metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joshuaskelly/wick",
"score": 4
} |
#### File: generators/csharp/filters.py
```python
def pascal_case(text):
"""Returns text converted to PascalCase"""
if text.count('_') == 0:
return text
s1 = text.split('_')
return ''.join([s.lower().capitalize() for s in s1])
def integral_type(member):
type = {
'char': 'char',
'signed char': 'sbyte',
'unsigned char': 'byte',
'short': 'short',
'unsigned short': 'ushort',
'int': 'int',
'unsigned int': 'uint',
'long': 'int',
'unsigned long': 'uint',
'long long': 'long',
'unsigned long long': 'ulong',
'float': 'float',
'double': 'double'
}[member.type]
return type
def csharp_type(member, show_length=False):
if member.type == 'char' and member.length > 1:
return 'string'
type = integral_type(member)
if member.length > 1:
return f'{type}[{member.length if show_length else ""}]'
return type
def reader_method(member):
type = integral_type(member)
method = {
'char': 'ReadChar',
'sbyte': 'ReadSByte',
'byte': 'ReadByte',
'short': 'ReadInt16',
'ushort': 'ReadUInt16',
'int': 'ReadInt32',
'uint': 'ReadUInt32',
'long': 'ReadInt64',
'ulong': 'ReadUInt64',
'float': 'ReadSingle',
'double': 'ReadDouble'
}[type]
return method
def lines(text):
ls = text.split('\n')
if not ls:
return []
if ls[0].strip() == '':
ls = ls[1:]
if not ls:
return []
if ls[-1].strip() == '':
ls = ls[:-1]
return ls
def leading_spaces(text):
return len(text) - len(text.lstrip())
def remove_miniumum_whitespace(lines):
try:
minimum_whitespace = min([leading_spaces(l) for l in lines])
return [l[minimum_whitespace:] for l in lines]
except ValueError:
return []
def xml_comment(text):
ls = lines(text)
ls = remove_miniumum_whitespace(ls)
return '\n'.join([f'/// {l}' for l in ls])
def comment(text):
if text.count('\n') == 0:
return single_line_comment(text)
return multi_line_comment(text)
def single_line_comment(text):
ls = lines(text)
ls = remove_miniumum_whitespace(ls)
return '\n'.join([f'// {l}' for l in ls])
def multi_line_comment(text):
ls = lines(text)
ls = remove_miniumum_whitespace(ls)
ls = [f' * {l}' for l in ls]
ls.insert(0, '/*')
ls.append(' */')
return '\n'.join(ls)
filters = {
'pascalcase': pascal_case,
'csharptype': csharp_type,
'readermethod': reader_method,
'comment': comment,
'singlelinecomment': single_line_comment,
'multilinecomment': multi_line_comment,
'xmlcomment': xml_comment
}
```
#### File: generators/markdown/elements.py
```python
class MarkDownElement:
def __init__(self, text=''):
self._text = text
def __str__(self):
return self.text
def __add__(self, other):
return self.text + str(other)
def __radd__(self, other):
return str(other) + self.text
@property
def text(self):
return str(self._text)
class H1(MarkDownElement):
def __str__(self):
return f'# {self.text}'
class H2(MarkDownElement):
def __str__(self):
return f'## {self.text}'
class H3(MarkDownElement):
def __str__(self):
return f'### {self.text}'
class H4(MarkDownElement):
def __str__(self):
return f'#### {self.text}'
class PlainText(MarkDownElement):
def __str__(self):
return self.text
class BlankLine(MarkDownElement):
def __str__(self):
return ''
class Table(MarkDownElement):
def __init__(self, headers):
self._headers = tuple(str(h) for h in headers)
self._entries = []
def add_entry(self, entry):
if len(entry) != len(self._headers):
raise RuntimeError('Number of element per entry should match that of the header')
self._entries.append(tuple([str(e) for e in entry]))
def __str__(self):
def columns(seq, char='|'):
return f'{char}{char.join(seq)}{char}'
widths = [len(h) for h in self._headers]
for entry in self._entries:
for i, z in enumerate(zip(widths, entry)):
w, e = z
widths[i] = max(w, len(e))
header = []
for i, h in enumerate(self._headers):
header.append(self._headers[i].center(widths[i] + 2))
header = columns(header)
divider = columns(['-' * (i + 2) for i in widths])
entries = []
for entry in self._entries:
e = [f' {i[1].ljust(i[0])} ' for i in zip(widths, entry)]
entries.append(columns(e))
result = []
result.append(header)
result.append(divider)
result+=entries
result = '\n'.join(result)
return result
```
#### File: generators/python/filters.py
```python
import math
import re
from collections import namedtuple
def snake_case(text):
"""Returns text converted to snake_case"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', text)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def pascal_case(text):
"""Returns text converted to PascalCase"""
s1 = text.split('_')
return ''.join([s.lower().capitalize() for s in s1])
def spaces(text):
"""Returns whitespace equal to the length of the given text.
This is useful for making things line up.
"""
return ' ' * len(text)
def format_string(members):
result = ''
for prop in members:
type = prop.type
if type == 'char' and prop.length > 1:
format = f'{prop.length}s'
result += format
else:
format = {
'char': 'c',
'signed char': 'b',
'unsigned char': 'B',
'short': 'h',
'unsigned short': 'H',
'int': 'i',
'unsigned int': 'I',
'long': 'l',
'unsigned long': 'L',
'long long': 'q',
'unsigned long long': 'Q',
'ssize_t': 'n',
'size_t': 'N',
'float': 'f',
'double': 'd'
}[type]
result += format * prop.length
return simplify_format_string(result)
RepeatedChar = namedtuple('RepeatedChar', ['count', 'char'])
def simplify_format_string(input):
if not input:
return ''
pairs = [RepeatedChar(1, input[0])]
for c in input[1:]:
repeat = pairs[-1]
if c == repeat.char and c != 's':
pairs[-1] = RepeatedChar(repeat.count + 1, repeat.char)
else:
pairs.append(RepeatedChar(1, c))
return ''.join(f'{p.count if p.count > 1 else ""}{p.char}' for p in pairs)
value_generators = {}
def test_data(member):
global value_generators
if member.type == 'char' and member.length > 1:
test_characters = bytes(range(32, 127)).decode("ascii")
count = math.ceil(member.length / len(test_characters))
test_characters = test_characters * count
return f'"""{test_characters[:member.length]}"""'
try:
value_generator = value_generators.get(member.type)
return next(value_generator)
except TypeError:
interesting_values = {
'char': [bytes([i]) for i in range(128)],
'signed char': [-128, 0, 127],
'unsigned char': [0, 255],
'short': [-32768, 0, 32767],
'unsigned short': [0, 65535],
'int': [-2147483648, 0, 2147483647],
'unsigned int': [0, 4294967295],
'long': [-2147483648, 0, 2147483647],
'unsigned long': [0, 4294967295],
'long long': [-9223372036854775808, 0, 9223372036854775807],
'unsigned long long': [0, 18446744073709551615],
'ssize_t': [0],
'size_t': [0],
'float': [-1.0, 0.0, 1.0],
'double': [-1.0, 0.0, 1.0]
}[member.type]
def value_generator():
i = 0
while True:
yield interesting_values[i]
i = (i + 1) % len(interesting_values)
value_generators[member.type] = value_generator()
return next(value_generators[member.type])
```
#### File: generators/template/filters.py
```python
def pascal_case(text):
"""Returns text converted to PascalCase"""
s1 = text.split('_')
return ''.join([s.lower().capitalize() for s in s1])
def csharp_type(member):
if member.type == member.char and member.length > 1:
return 'string'
csharp_type = {
'char': 'char',
'signed char': 'sbyte',
'unsigned char': 'byte',
'short': 'short',
'unsigned short': 'ushort',
'int': 'int',
'unsigned int': 'uint',
'long': 'ing',
'unsigned long': 'uint',
'long long': 'long',
'unsigned long long': 'ulong',
'float': 'float',
'double': 'double'
}[member.type]
if member.length > 1:
return f'{csharp_type}[]'
return csharp_type
```
#### File: wick/parser/common.py
```python
class Position:
def __init__(self, line:int, character:int):
self.line = line
self.character = character
def __iter__(self):
return iter([self.line, self.character])
class Range:
def __init__(self, start, end):
self.start = Position(*start)
self.end = Position(*end)
def contains(self, position: Position):
if position.line < self.start.line:
return False
elif position.line > self.end.line:
return False
elif position.character < self.start.character:
return False
elif position.character > self.end.character:
return False
return True
``` |
{
"source": "joshua-s/kitsune",
"score": 2
} |
#### File: kitsune/dashboards/cron.py
```python
from datetime import date
from django.conf import settings
from django.db import connection
import cronjobs
from kitsune.dashboards.models import (
PERIODS, WikiDocumentVisits, WikiMetric, L10N_TOP20_CODE, L10N_ALL_CODE,
L10N_ACTIVE_CONTRIBUTORS_CODE)
from kitsune.dashboards.readouts import overview_rows
from kitsune.products.models import Product
from kitsune.sumo.redis_utils import redis_client
from kitsune.wiki.models import Document
from kitsune.wiki.utils import num_active_contributors
@cronjobs.register
def reload_wiki_traffic_stats():
if settings.STAGE:
print ('Skipped reload_wiki_traffic_stats(). '
'Set settings.STAGE to False to run it for real.')
return
for period, _ in PERIODS:
WikiDocumentVisits.reload_period_from_analytics(
period, verbose=settings.DEBUG)
@cronjobs.register
def update_l10n_coverage_metrics():
"""Calculate and store the l10n metrics for each locale/product.
The metrics are:
* Percent localized of top 20 articles
* Percent localized of all articles
"""
today = date.today()
# Loop through all locales.
for locale in settings.SUMO_LANGUAGES:
# Skip en-US, it is always 100% localized.
if locale == settings.WIKI_DEFAULT_LANGUAGE:
continue
# Loop through all enabled products, including None (really All).
for product in [None] + list(Product.objects.filter(visible=True)):
# (Ab)use the overview_rows helper from the readouts.
rows = overview_rows(locale=locale, product=product)
# % of top 20 articles
top20 = rows['top-20']
percent = 100.0 * float(top20['numerator']) / top20['denominator']
WikiMetric.objects.create(
code=L10N_TOP20_CODE,
locale=locale,
product=product,
date=today,
value=percent)
# % of all articles
all_ = rows['all']
try:
percent = 100 * float(all_['numerator']) / all_['denominator']
except ZeroDivisionError:
percent = 0.0
WikiMetric.objects.create(
code=L10N_ALL_CODE,
locale=locale,
product=product,
date=today,
value=percent)
@cronjobs.register
def update_l10n_contributor_metrics(day=None):
"""Update the number of active contributors for each locale/product.
An active contributor is defined as a user that created or reviewed a
revision in the previous calendar month.
"""
if day is None:
day = date.today()
first_of_month = date(day.year, day.month, 1)
if day.month == 1:
previous_first_of_month = date(day.year - 1, 12, 1)
else:
previous_first_of_month = date(day.year, day.month - 1, 1)
# Loop through all locales.
for locale in settings.SUMO_LANGUAGES:
# Loop through all enabled products, including None (really All).
for product in [None] + list(Product.objects.filter(visible=True)):
num = num_active_contributors(
from_date=previous_first_of_month,
to_date=first_of_month,
locale=locale,
product=product)
WikiMetric.objects.create(
code=L10N_ACTIVE_CONTRIBUTORS_CODE,
locale=locale,
product=product,
date=previous_first_of_month,
value=num)
def _get_old_unhelpful():
"""
Gets the data from 2 weeks ago and formats it as output so that we can
get a percent change.
"""
old_formatted = {}
cursor = connection.cursor()
cursor.execute(
"""SELECT doc_id, yes, no
FROM
(SELECT wiki_revision.document_id as doc_id,
SUM(limitedvotes.helpful) as yes,
SUM(NOT(limitedvotes.helpful)) as no
FROM
(SELECT * FROM wiki_helpfulvote
WHERE created <= DATE_SUB(CURDATE(), INTERVAL 1 WEEK)
AND created >= DATE_SUB(DATE_SUB(CURDATE(),
INTERVAL 1 WEEK), INTERVAL 1 WEEK)
) as limitedvotes
INNER JOIN wiki_revision ON
limitedvotes.revision_id=wiki_revision.id
INNER JOIN wiki_document ON
wiki_document.id=wiki_revision.document_id
WHERE wiki_document.locale="en-US"
GROUP BY doc_id
HAVING no > yes
) as calculated""")
old_data = cursor.fetchall()
for data in old_data:
doc_id = data[0]
yes = float(data[1])
no = float(data[2])
total = yes + no
if total == 0:
continue
old_formatted[doc_id] = {'total': total,
'percentage': yes / total}
return old_formatted
def _get_current_unhelpful(old_formatted):
"""Gets the data for the past week and formats it as return value."""
final = {}
cursor = connection.cursor()
cursor.execute(
"""SELECT doc_id, yes, no
FROM
(SELECT wiki_revision.document_id as doc_id,
SUM(limitedvotes.helpful) as yes,
SUM(NOT(limitedvotes.helpful)) as no
FROM
(SELECT * FROM wiki_helpfulvote
WHERE created >= DATE_SUB(CURDATE(), INTERVAL 1 WEEK)
) as limitedvotes
INNER JOIN wiki_revision ON
limitedvotes.revision_id=wiki_revision.id
INNER JOIN wiki_document ON
wiki_document.id=wiki_revision.document_id
WHERE wiki_document.locale="en-US"
GROUP BY doc_id
HAVING no > yes
) as calculated""")
current_data = cursor.fetchall()
for data in current_data:
doc_id = data[0]
yes = float(data[1])
no = float(data[2])
total = yes + no
if total == 0:
continue
percentage = yes / total
if doc_id in old_formatted:
final[doc_id] = {
'total': total,
'currperc': percentage,
'diffperc': percentage - old_formatted[doc_id]['percentage']
}
else:
final[doc_id] = {
'total': total,
'currperc': percentage,
'diffperc': 0.0
}
return final
@cronjobs.register
def cache_most_unhelpful_kb_articles():
"""Calculate and save the most unhelpful KB articles in the past month."""
REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY
old_formatted = _get_old_unhelpful()
final = _get_current_unhelpful(old_formatted)
if final == {}:
return
def _mean(vals):
"""Argument: List of floats"""
if len(vals) == 0:
return None
return sum(vals) / len(vals)
def _bayes_avg(C, m, R, v):
# Bayesian Average
# C = mean vote, v = number of votes,
# R = mean rating, m = minimum votes to list in topranked
return (C * m + R * v) / (m + v)
mean_perc = _mean([float(final[key]['currperc']) for key in final.keys()])
mean_total = _mean([float(final[key]['total']) for key in final.keys()])
# TODO: Make this into namedtuples
sorted_final = [(key,
final[key]['total'],
final[key]['currperc'],
final[key]['diffperc'],
_bayes_avg(mean_perc, mean_total,
final[key]['currperc'],
final[key]['total']))
for key in final.keys()]
sorted_final.sort(key=lambda entry: entry[4]) # Sort by Bayesian Avg
redis = redis_client('helpfulvotes')
redis.delete(REDIS_KEY)
max_total = max([b[1] for b in sorted_final])
for entry in sorted_final:
doc = Document.objects.get(pk=entry[0])
redis.rpush(REDIS_KEY, (u'%s::%s::%s::%s::%s::%s::%s' %
(entry[0], # Document ID
entry[1], # Total Votes
entry[2], # Current Percentage
entry[3], # Difference in Percentage
1 - (entry[1] / max_total), # Graph Color
doc.slug, # Document slug
doc.title))) # Document title
```
#### File: kitsune/upload/models.py
```python
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
from kitsune.sumo.helpers import reverse
from kitsune.sumo.models import NoCacheModelBase
from kitsune.sumo.utils import auto_delete_files
@auto_delete_files
class ImageAttachment(NoCacheModelBase):
"""An image attached to an object using a generic foreign key"""
file = models.ImageField(upload_to=settings.IMAGE_UPLOAD_PATH,
max_length=settings.MAX_FILEPATH_LENGTH)
thumbnail = models.ImageField(upload_to=settings.THUMBNAIL_UPLOAD_PATH,
null=True)
creator = models.ForeignKey(User, related_name='image_attachments')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __unicode__(self):
return self.file.name
def get_absolute_url(self):
return self.file.url
def thumbnail_if_set(self):
"""Returns self.thumbnail, if set, else self.file"""
return self.thumbnail if self.thumbnail else self.file
def get_delete_url(self):
"""Returns the URL to delete this object. Assumes the object has an
id."""
return reverse('upload.del_image_async', args=[self.id])
```
#### File: kitsune/users/cron.py
```python
import cronjobs
from kitsune.search.models import generate_tasks
from kitsune.users.models import RegistrationProfile
@cronjobs.register
def remove_expired_registration_profiles():
""""Cleanup expired registration profiles and users that not activated."""
RegistrationProfile.objects.delete_expired_users()
generate_tasks()
```
#### File: kitsune/wiki/forms.py
```python
import re
from django import forms
from django.conf import settings
from django.template.defaultfilters import slugify
from tower import ugettext_lazy as _lazy
from kitsune.products.models import Product, Topic
from kitsune.sumo.form_fields import MultiUsernameField, StrippedCharField
from kitsune.wiki.config import SIGNIFICANCES, CATEGORIES
from kitsune.wiki.models import (
Document, Revision, MAX_REVISION_COMMENT_LENGTH)
from kitsune.wiki.widgets import (
RadioFieldRendererWithHelpText, ProductTopicsAndSubtopicsWidget)
TITLE_REQUIRED = _lazy(u'Please provide a title.')
TITLE_SHORT = _lazy(u'The title is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
TITLE_LONG = _lazy(u'Please keep the length of the title to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SLUG_REQUIRED = _lazy(u'Please provide a slug.')
SLUG_INVALID = _lazy(u'The slug provided is not valid.')
SLUG_SHORT = _lazy(u'The slug is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SLUG_LONG = _lazy(u'Please keep the length of the slug to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SUMMARY_REQUIRED = _lazy(u'Please provide a summary.')
SUMMARY_SHORT = _lazy(u'The summary is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SUMMARY_LONG = _lazy(u'Please keep the length of the summary to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
CONTENT_REQUIRED = _lazy(u'Please provide content.')
CONTENT_SHORT = _lazy(u'The content is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
CONTENT_LONG = _lazy(u'Please keep the length of the content to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
COMMENT_LONG = _lazy(u'Please keep the length of the comment to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
PRODUCT_REQUIRED = _lazy(u'Please select at least one product.')
TOPIC_REQUIRED = _lazy(u'Please select at least one topic.')
class DocumentForm(forms.ModelForm):
"""Form to create/edit a document."""
def __init__(self, *args, **kwargs):
# Quasi-kwargs:
can_archive = kwargs.pop('can_archive', False)
can_edit_needs_change = kwargs.pop('can_edit_needs_change', False)
initial_title = kwargs.pop('initial_title', '')
super(DocumentForm, self).__init__(*args, **kwargs)
title_field = self.fields['title']
title_field.initial = initial_title
slug_field = self.fields['slug']
slug_field.initial = slugify(initial_title)
topics_field = self.fields['topics']
topics_field.choices = Topic.objects.values_list('id', 'title')
products_field = self.fields['products']
products_field.choices = Product.objects.values_list('id', 'title')
# If user hasn't permission to frob is_archived, remove the field. This
# causes save() to skip it as well.
if not can_archive:
del self.fields['is_archived']
# If user hasn't permission to mess with needs_change*, remove the
# fields. This causes save() to skip it as well.
if not can_edit_needs_change:
del self.fields['needs_change']
del self.fields['needs_change_comment']
title = StrippedCharField(
min_length=5, max_length=255,
widget=forms.TextInput(),
label=_lazy(u'Title:'),
help_text=_lazy(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
# We don't use forms.SlugField because it is too strict in
# what it allows (English/Roman alpha-numeric characters and dashes).
# Instead, we do custom validation in `clean_slug` below.
slug = StrippedCharField(
min_length=3, max_length=255,
widget=forms.TextInput(),
label=_lazy(u'Slug:'),
help_text=_lazy(u'Article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
products = forms.MultipleChoiceField(
label=_lazy(u'Relevant to:'),
required=False,
widget=forms.CheckboxSelectMultiple())
is_localizable = forms.BooleanField(
initial=True,
label=_lazy(u'Allow translations:'),
required=False)
is_archived = forms.BooleanField(
label=_lazy(u'Obsolete:'),
required=False)
allow_discussion = forms.BooleanField(
label=_lazy(u'Allow discussion on this article?'),
initial=True,
required=False)
category = forms.ChoiceField(
choices=CATEGORIES,
# Required for non-translations, which is
# enforced in Document.clean().
required=False,
label=_lazy(u'Category:'),
help_text=_lazy(u'Type of article'))
topics = forms.MultipleChoiceField(
label=_lazy(u'Topics:'),
required=False,
widget=ProductTopicsAndSubtopicsWidget())
locale = forms.CharField(widget=forms.HiddenInput())
needs_change = forms.BooleanField(
label=_lazy(u'Needs change:'),
initial=False,
required=False)
needs_change_comment = forms.CharField(
label=_lazy(u'Comment:'),
widget=forms.Textarea(),
required=False)
def clean_slug(self):
slug = self.cleaned_data['slug']
# Blacklist /, ?, % and +,
if not re.compile(r'^[^/^\+^\?%]+$').match(slug):
raise forms.ValidationError(SLUG_INVALID)
return slug
def clean(self):
c = super(DocumentForm, self).clean()
locale = c.get('locale')
# Products are required for en-US
products = c.get('products')
if (locale == settings.WIKI_DEFAULT_LANGUAGE and
(not products or len(products) < 1)):
raise forms.ValidationError(PRODUCT_REQUIRED)
# Topics are required for en-US
topics = c.get('topics')
if (locale == settings.WIKI_DEFAULT_LANGUAGE and
(not topics or len(topics) < 1)):
raise forms.ValidationError(TOPIC_REQUIRED)
return c
class Meta:
model = Document
fields = ('title', 'slug', 'category', 'is_localizable', 'products',
'topics', 'locale', 'is_archived', 'allow_discussion',
'needs_change', 'needs_change_comment')
def save(self, parent_doc, **kwargs):
"""Persist the Document form, and return the saved Document."""
doc = super(DocumentForm, self).save(commit=False, **kwargs)
doc.parent = parent_doc
# If document doesn't need change, clear out the comment.
if not doc.needs_change:
doc.needs_change_comment = ''
doc.save()
self.save_m2m()
if parent_doc:
# Products are not set on translations.
doc.products.remove(*[p for p in doc.products.all()])
return doc
class RevisionForm(forms.ModelForm):
"""Form to create new revisions."""
keywords = StrippedCharField(required=False,
label=_lazy(u'Keywords:'),
help_text=_lazy(u'Affects search results'))
summary = StrippedCharField(
min_length=5, max_length=1000, widget=forms.Textarea(),
label=_lazy(u'Search result summary:'),
help_text=_lazy(u'Only displayed on search results page'),
error_messages={'required': SUMMARY_REQUIRED,
'min_length': SUMMARY_SHORT,
'max_length': SUMMARY_LONG})
content = StrippedCharField(
min_length=5, max_length=100000,
label=_lazy(u'Content:'),
widget=forms.Textarea(),
error_messages={'required': CONTENT_REQUIRED,
'min_length': CONTENT_SHORT,
'max_length': CONTENT_LONG})
comment = StrippedCharField(required=False, label=_lazy(u'Comment:'))
class Meta(object):
model = Revision
fields = ('keywords', 'summary', 'content', 'comment', 'based_on')
def __init__(self, *args, **kwargs):
super(RevisionForm, self).__init__(*args, **kwargs)
self.fields['based_on'].widget = forms.HiddenInput()
self.fields['comment'].widget = forms.TextInput(
attrs={'maxlength': MAX_REVISION_COMMENT_LENGTH})
def save(self, creator, document, based_on_id=None, base_rev=None,
**kwargs):
"""Persist me, and return the saved Revision.
Take several other necessary pieces of data that aren't from the
form.
"""
# Throws a TypeError if somebody passes in a commit kwarg:
new_rev = super(RevisionForm, self).save(commit=False, **kwargs)
new_rev.document = document
new_rev.creator = creator
if based_on_id:
new_rev.based_on_id = based_on_id
# If the document doesn't allow the revision creator to edit the
# keywords, keep the old value.
if base_rev and not document.allows(creator, 'edit_keywords'):
new_rev.keywords = base_rev.keywords
new_rev.save()
return new_rev
class ReviewForm(forms.Form):
comment = StrippedCharField(max_length=2000, widget=forms.Textarea(),
required=False, label=_lazy(u'Comment:'),
error_messages={'max_length': COMMENT_LONG})
_widget = forms.RadioSelect(renderer=RadioFieldRendererWithHelpText)
significance = forms.TypedChoiceField(
label=_lazy(u'Significance:'),
choices=SIGNIFICANCES,
initial=SIGNIFICANCES[1][0],
required=False, widget=_widget,
coerce=int, empty_value=SIGNIFICANCES[1][0])
is_ready_for_localization = forms.BooleanField(
initial=False,
label=_lazy(u'Ready for localization'),
required=False)
needs_change = forms.BooleanField(
label=_lazy(u'Needs change'),
initial=False,
required=False)
needs_change_comment = forms.CharField(
label=_lazy(u'Comment:'),
widget=forms.Textarea(),
required=False)
class AddContributorForm(forms.Form):
"""Form to add contributors to a document."""
users = MultiUsernameField(
widget=forms.TextInput(attrs={'placeholder': _lazy(u'username'),
'class': 'user-autocomplete'}))
languages = [('', 'Any')] + [(l[0], u'{1} ({0})'.format(*l))
for l in settings.LANGUAGE_CHOICES]
class RevisionFilterForm(forms.Form):
"""Form to filter a list of revisions."""
locale = forms.ChoiceField(label=_lazy(u'Locale:'), choices=languages,
required=False)
users = MultiUsernameField(label=_lazy(u'Users:'), required=False)
start = forms.DateField(label=_lazy(u'Start:'), required=False)
end = forms.DateField(label=_lazy(u'End:'), required=False)
```
#### File: wiki/tests/__init__.py
```python
from datetime import datetime
from django.template.defaultfilters import slugify
from kitsune.products.models import Product
from kitsune.products.tests import product, topic
from kitsune.sumo.tests import LocalizingClient, TestCase, with_save
from kitsune.users.tests import user
from kitsune.wiki.models import Document, Revision, HelpfulVote, Locale
from kitsune.wiki.config import CATEGORIES, SIGNIFICANCES
class TestCaseBase(TestCase):
"""Base TestCase for the wiki app test cases."""
client_class = LocalizingClient
# Model makers. These make it clearer and more concise to create objects in
# test cases. They allow the significant attribute values to stand out rather
# than being hidden amongst the values needed merely to get the model to
# validate.
@with_save
def document(**kwargs):
"""Return an empty document with enough stuff filled out that it can be
saved."""
defaults = {'category': CATEGORIES[0][0],
'title': u'đ' + str(datetime.now())}
defaults.update(kwargs)
if 'slug' not in kwargs:
defaults['slug'] = slugify(defaults['title'])
return Document(**defaults)
@with_save
def revision(**kwargs):
"""Return an empty revision with enough stuff filled out that it can be
saved.
Revision's is_approved=False unless you specify otherwise.
Requires a users fixture if no creator is provided.
"""
d = kwargs.pop('document', None) or document(save=True)
defaults = {'summary': u'đSome summary', 'content': u'đSome content',
'significance': SIGNIFICANCES[0][0],
'comment': u'đSome comment',
'creator': kwargs.get('creator', user(save=True)),
'document': d}
defaults.update(kwargs)
return Revision(**defaults)
@with_save
def helpful_vote(**kwargs):
r = kwargs.pop('revision', None) or revision(save=True)
defaults = {'created': datetime.now(), 'helpful': False, 'revision': r}
defaults.update(kwargs)
return HelpfulVote(**defaults)
@with_save
def locale(**kwargs):
defaults = {'locale': 'en-US'}
defaults.update(kwargs)
return Locale(**defaults)
def translated_revision(locale='de', save=False, **kwargs):
"""Return a revision that is the translation of a default-language one."""
parent_rev = revision(is_approved=True,
is_ready_for_localization=True,
save=True)
translation = document(parent=parent_rev.document, locale=locale,
save=True)
new_kwargs = {'document': translation, 'based_on': parent_rev}
new_kwargs.update(kwargs)
return revision(save=save, **new_kwargs)
# I don't like this thing. revision() is more flexible. All this adds is
# is_approved=True, but it doesn't even mention approval in its name.
# TODO: Remove.
def doc_rev(content=''):
"""Save a document and an approved revision with the given content."""
r = revision(content=content, is_approved=True)
r.save()
return r.document, r
# End model makers.
def new_document_data(topic_ids=None, product_ids=None):
product_ids = product_ids or [product(save=True).id]
p = Product.objects.get(id=product_ids[0])
topic_ids = topic_ids or [topic(product=p, save=True).id]
return {
'title': 'A Test Article',
'slug': 'a-test-article',
'locale': 'en-US',
'topics': topic_ids,
'products': product_ids,
'category': CATEGORIES[0][0],
'keywords': 'key1, key2',
'summary': 'lipsum',
'content': 'lorem ipsum dolor sit amet',
}
```
#### File: wiki/tests/test_parser.py
```python
import re
from django.conf import settings
from nose.tools import eq_
from pyquery import PyQuery as pq
import kitsune.sumo.tests.test_parser
from kitsune.gallery.models import Video
from kitsune.gallery.tests import image, video
from kitsune.sumo.tests import TestCase
from kitsune.wiki.models import Document
from kitsune.wiki.parser import (
WikiParser, ForParser, PATTERNS, RECURSION_MESSAGE, _key_split,
_build_template_params as _btp, _format_template_content as _ftc)
from kitsune.wiki.tests import document, revision
def doc_rev_parser(*args, **kwargs):
return kitsune.sumo.tests.test_parser.doc_rev_parser(
*args, parser_cls=WikiParser, **kwargs)
def doc_parse_markup(content, markup, title='Template:test'):
"""Create a doc with given content and parse given markup."""
_, _, p = doc_rev_parser(content, title)
doc = pq(p.parse(markup))
return (doc, p)
class SimpleSyntaxTestCase(TestCase):
"""Simple syntax regexing, like {note}...{/note}, {key Ctrl+K}"""
def test_note_simple(self):
"""Simple note syntax"""
p = WikiParser()
doc = pq(p.parse('{note}this is a note{/note}'))
eq_('this is a note', doc('div.note').text())
def test_warning_simple(self):
"""Simple warning syntax"""
p = WikiParser()
doc = pq(p.parse('{warning}this is a warning{/warning}'))
eq_('this is a warning', doc('div.warning').text())
def test_warning_multiline(self):
"""Multiline warning syntax"""
p = WikiParser()
doc = pq(p.parse('{warning}\nthis is a warning\n{/warning}'))
eq_('this is a warning', doc('div.warning').text())
def test_warning_multiline_breaks(self):
"""Multiline breaks warning syntax"""
p = WikiParser()
doc = pq(p.parse('\n\n{warning}\n\nthis is a warning\n\n'
'{/warning}\n\n'))
eq_('this is a warning', doc('div.warning').text())
def test_general_warning_note(self):
"""A bunch of wiki text with {warning} and {note}"""
p = WikiParser()
doc = pq(p.parse('\n\n{warning}\n\nthis is a warning\n\n{note}'
'this is a note{warning}!{/warning}{/note}'
"[[Installing Firefox]] '''internal''' ''link''"
'{/warning}\n\n'))
eq_('!', doc('div.warning div.warning').text())
eq_('this is a note !', doc('div.note').text())
eq_('Installing Firefox', doc('a').text())
eq_('internal', doc('strong').text())
eq_('link', doc('em').text())
def test_key_inline(self):
"""{key} stays inline"""
p = WikiParser()
doc = pq(p.parse('{key Cmd+Shift+Q}'))
eq_(1, len(doc('p')))
eq_(u'<span class="key">Cmd</span> + <span class="key">Shift</span>'
u' + <span class="key">Q</span>', doc.html().replace('\n', ''))
def test_template_inline(self):
"""Inline templates are not wrapped in <p>s"""
doc, p = doc_parse_markup('<span class="key">{{{1}}}</span>',
'[[T:test|Cmd]] + [[T:test|Shift]]')
eq_(1, len(doc('p')))
def test_template_multiline(self):
"""Multiline templates are wrapped in <p>s"""
doc, p = doc_parse_markup('<span class="key">\n{{{1}}}</span>',
'[[T:test|Cmd]]')
eq_(3, len(doc('p')))
def test_key_split_callback(self):
"""The _key_split regex callback does what it claims"""
key_p = PATTERNS[2][0]
# Multiple keys, with spaces
eq_('<span class="key">ctrl</span> + <span class="key">alt</span> + '
'<span class="key">del</span>',
key_p.sub(_key_split, '{key ctrl + alt + del}'))
# Single key with spaces in it
eq_('<span class="key">a key</span>',
key_p.sub(_key_split, '{key a key}'))
# Multiple keys with quotes and spaces
eq_('<span class="key">"Param-One" and</span> + <span class="key">'
'param</span> + <span class="key">two</span>',
key_p.sub(_key_split, '{key "Param-One" and + param+two}'))
eq_('<span class="key">multi\nline</span> + '
'<span class="key">me</span>',
key_p.sub(_key_split, '{key multi\nline\n+me}'))
def test_key_split_brace_callback(self):
"""Adding brace inside {key ...}"""
key_p = PATTERNS[2][0]
eq_('<span class="key">ctrl</span> + <span class="key">and</span> '
'Here is }',
key_p.sub(_key_split, '{key ctrl + and} Here is }'))
eq_('<span class="key">ctrl</span> + <span class="key">and</span> + '
'<span class="key">{</span>',
key_p.sub(_key_split, '{key ctrl + and + {}'))
def test_simple_inline_custom(self):
"""Simple custom inline syntax: menu, button, filepath, pref"""
p = WikiParser()
tags = ['menu', 'button', 'filepath', 'pref']
for tag in tags:
doc = pq(p.parse('{%s this is a %s}' % (tag, tag)))
eq_('this is a ' + tag, doc('span.' + tag).text())
def test_general_warning_note_inline_custom(self):
"""A mix of custom inline syntax with warnings and notes"""
p = WikiParser()
doc = pq(p.parse('\n\n{warning}\n\nthis is a {button warning}\n{note}'
'this is a {menu note}{warning}!{/warning}{/note}'
"'''{filepath internal}''' ''{menu hi!}''{/warning}"))
eq_('warning', doc('div.warning span.button').text())
eq_('this is a note !', doc('div.note').text())
eq_('note', doc('div.warning div.note span.menu').text())
eq_('internal', doc('strong span.filepath').text())
eq_('hi!', doc('em span.menu').text())
def test_comments(self):
"""Markup containing taggy comments shouldn't truncate afterward."""
p = WikiParser()
# This used to truncate after the comment when rendered:
eq_(p.parse('Start <!-- <foo --> End'),
'<p>Start End\n</p>')
# Just make sure these don't go awry either:
eq_(p.parse('Start <!-- <foo> --> End'),
'<p>Start End\n</p>')
eq_(p.parse('Start <!-- foo> --> End'),
'<p>Start End\n</p>')
def test_internal_links(self):
"""Make sure internal links work correctly when not to redirected
articles and when to redirected articles"""
p = WikiParser()
# Create a new article
rev = revision(is_approved=True, save=True)
doc = rev.document
doc.current_revision = rev
doc.title = 'Real article'
doc.save()
# Change the slug of the article to create a redirected article
old_slug = doc.slug
doc.slug = 'real-article'
doc.save()
redirect = Document.objects.get(slug=old_slug)
# Both internal links should link to the same article
eq_(p.parse('[[%s]]' % doc.title),
'<p><a href="/en-US/kb/%s">%s</a>\n</p>' % (doc.slug, doc.title))
eq_(p.parse('[[%s]]' % redirect.title),
'<p><a href="/en-US/kb/%s">%s</a>\n</p>' % (doc.slug, doc.title))
class TestWikiTemplate(TestCase):
def test_template(self):
"""Simple template markup."""
doc, _ = doc_parse_markup('Test content', '[[Template:test]]')
eq_('Test content', doc.text())
def test_template_does_not_exist(self):
"""Return a message if template does not exist"""
p = WikiParser()
doc = pq(p.parse('[[Template:test]]'))
eq_('The template "test" does not exist or has no approved revision.',
doc.text())
def test_template_locale(self):
"""Localized template is returned."""
py_doc, p = doc_parse_markup('English content', '[[Template:test]]')
parent = document()
d = document(parent=parent, title='Template:test', locale='fr')
d.save()
r = revision(content='French content', document=d, is_approved=True)
r.save()
eq_('English content', py_doc.text())
py_doc = pq(p.parse('[[T:test]]', locale='fr'))
eq_('French content', py_doc.text())
def test_template_not_exist(self):
"""If template does not exist in set locale or English."""
p = WikiParser()
doc = pq(p.parse('[[T:test]]', locale='fr'))
eq_('The template "test" does not exist or has no approved revision.',
doc.text())
def test_template_locale_fallback(self):
"""If localized template does not exist, fall back to English."""
_, p = doc_parse_markup('English content', '[[Template:test]]')
doc = pq(p.parse('[[T:test]]', locale='fr'))
eq_('English content', doc.text())
def test_template_anonymous_params(self):
"""Template markup with anonymous parameters."""
doc, p = doc_parse_markup('{{{1}}}:{{{2}}}',
'[[Template:test|one|two]]')
eq_('one:two', doc.text())
doc = pq(p.parse('[[T:test|two|one]]'))
eq_('two:one', doc.text())
def test_template_named_params(self):
"""Template markup with named parameters."""
doc, p = doc_parse_markup('{{{a}}}:{{{b}}}',
'[[Template:test|a=one|b=two]]')
eq_('one:two', doc.text())
doc = pq(p.parse('[[T:test|a=two|b=one]]'))
eq_('two:one', doc.text())
def test_template_numbered_params(self):
"""Template markup with numbered parameters."""
doc, p = doc_parse_markup('{{{1}}}:{{{2}}}',
'[[Template:test|2=one|1=two]]')
eq_('two:one', doc.text())
doc = pq(p.parse('[[T:test|2=two|1=one]]'))
eq_('one:two', doc.text())
def test_template_wiki_markup(self):
"""A template with wiki markup"""
doc, _ = doc_parse_markup("{{{1}}}:{{{2}}}\n''wiki''\n'''markup'''",
'[[Template:test|2=one|1=two]]')
eq_('two:one', doc('p')[1].text.replace('\n', ''))
eq_('wiki', doc('em')[0].text)
eq_('markup', doc('strong')[0].text)
def test_template_args_inline_wiki_markup(self):
"""Args that contain inline wiki markup are parsed"""
doc, _ = doc_parse_markup('{{{1}}}\n\n{{{2}}}',
"[[Template:test|'''one'''|''two'']]")
eq_("<p/><p><strong>one</strong></p><p><em>two</em></p><p/>",
doc.html().replace('\n', ''))
def test_template_args_block_wiki_markup(self):
"""Args that contain block level wiki markup aren't parsed"""
doc, _ = doc_parse_markup('{{{1}}}\n\n{{{2}}}',
"[[Template:test|* ordered|# list]]")
eq_("<p/><p>* ordered</p><p># list</p><p/>",
doc.html().replace('\n', ''))
def test_format_template_content_named(self):
"""_ftc handles named arguments"""
eq_('ab', _ftc('{{{some}}}{{{content}}}',
{'some': 'a', 'content': 'b'}))
def test_format_template_content_numbered(self):
"""_ftc handles numbered arguments"""
eq_('a:b', _ftc('{{{1}}}:{{{2}}}', {'1': 'a', '2': 'b'}))
def test_build_template_params_anonymous(self):
"""_btp handles anonymous arguments"""
eq_({'1': '<span>a</span>', '2': 'test'},
_btp(['<span>a</span>', 'test']))
def test_build_template_params_numbered(self):
"""_btp handles numbered arguments"""
eq_({'20': 'a', '10': 'test'}, _btp(['20=a', '10=test']))
def test_build_template_params_named(self):
"""_btp handles only named-arguments"""
eq_({'a': 'b', 'hi': 'test'}, _btp(['hi=test', 'a=b']))
def test_build_template_params_named_anonymous(self):
"""_btp handles mixed named and anonymous arguments"""
eq_({'1': 'a', 'hi': 'test'}, _btp(['hi=test', 'a']))
def test_build_template_params_named_numbered(self):
"""_btp handles mixed named and numbered arguments"""
eq_({'10': 'a', 'hi': 'test'}, _btp(['hi=test', '10=a']))
def test_build_template_params_named_anonymous_numbered(self):
"""_btp handles mixed named, anonymous and numbered arguments"""
eq_({'1': 'a', 'hi': 'test', '3': 'z'}, _btp(['hi=test', 'a', '3=z']))
def test_unapproved_template(self):
document(title='Template:new').save()
p = WikiParser()
doc = pq(p.parse('[[T:new]]'))
eq_('The template "new" does not exist or has no approved revision.',
doc.text())
def test_for_in_template(self):
"""Verify that {for}'s render correctly in template."""
d = document(title='Template:for')
d.save()
r = revision(document=d,
content='{for win}windows{/for}{for mac}mac{/for}')
r.is_approved = True
r.save()
p = WikiParser()
content = p.parse('[[Template:for]]')
eq_('<p><span class="for" data-for="win">windows</span>'
'<span class="for" data-for="mac">mac</span>\n\n</p>',
content)
def test_button_for_nesting(self):
"""You can nest {for}s inside {button}."""
text = '{button start {for mac}mac{/for}{for win}win{/for} rest}'
p = WikiParser()
content = p.parse(text)
eq_(u'<p><span class="button">start '
u'<span class="for" data-for="mac">mac</span>'
u'<span class="for" data-for="win">win</span> '
u'rest</span>\n</p>', content)
def test_button_image_for_nesting(self):
"""You can nest [[Image:]] inside {for} inside {button}."""
image(title='image-file.png')
text = '{button {for mac}[[Image:image-file.png]]{/for} text}'
p = WikiParser()
doc = pq(p.parse(text))
assert 'frameless' in doc('img').attr('class')
eq_(0, doc('div.caption').length)
eq_(0, doc('div.img').length)
def test_direct_recursion(self):
"""Make sure direct recursion is caught on the very first nesting."""
d = document(title='Template:Boo')
d.save()
# Twice so the second revision sees content identical to itself:
for i in range(2):
revision(document=d, content='Fine [[Template:Boo]] Fellows',
is_approved=True).save()
eq_('<p>Fine %s Fellows\n</p>' % (RECURSION_MESSAGE % 'Template:Boo'),
d.content_parsed)
def test_indirect_recursion(self):
"""Make sure indirect recursion is caught."""
boo = document(title='Template:Boo')
boo.save()
yah = document(title='Template:Yah')
yah.save()
revision(document=boo, content='Paper [[Template:Yah]] Cups',
is_approved=True).save()
revision(document=yah, content='Wooden [[Template:Boo]] Bats',
is_approved=True).save()
recursion_message = RECURSION_MESSAGE % 'Template:Boo'
eq_('<p>Paper Wooden %s Bats\n Cups\n</p>' % recursion_message,
boo.content_parsed)
class TestWikiInclude(TestCase):
def test_revision_include(self):
"""Simple include markup."""
_, _, p = doc_rev_parser('Test content', 'Test title')
# Existing title returns document's content
doc = pq(p.parse('[[I:Test title]]'))
eq_('Test content', doc.text())
# Nonexisting title returns 'Document not found'
doc = pq(p.parse('[[Include:Another title]]'))
eq_('The document "Another title" does not exist.', doc.text())
def test_revision_include_locale(self):
"""Include finds document in the correct locale."""
_, _, p = doc_rev_parser('English content', 'Test title')
# Parsing in English should find the French article
doc = pq(p.parse('[[Include:Test title]]', locale='en-US'))
eq_('English content', doc.text())
# The French article will include the English content as fallback.
doc = pq(p.parse('[[I:Test title]]', locale='fr'))
eq_('English content', doc.text())
# Create the French article, and test again
parent_rev = revision()
d = document(parent=parent_rev.document, title='Test title',
locale='fr')
d.save()
r = revision(document=d, content='French content', is_approved=True)
r.save()
# Parsing in French should find the French article
doc = pq(p.parse('[[Include:Test title]]', locale='fr'))
eq_('French content', doc.text())
def test_direct_recursion(self):
"""Make sure direct recursion is caught on the very first nesting."""
d = document(title='Boo')
d.save()
# Twice so the second revision sees content identical to itself:
for i in range(2):
revision(document=d, content='Fine [[Include:Boo]] Fellows',
is_approved=True).save()
eq_('<p>Fine %s Fellows\n</p>' % (RECURSION_MESSAGE % 'Boo'),
d.content_parsed)
def test_indirect_recursion(self):
"""Make sure indirect recursion is caught."""
boo = document(title='Boo')
boo.save()
yah = document(title='Yah')
yah.save()
revision(document=boo, content='Paper [[Include:Yah]] Cups',
is_approved=True).save()
revision(document=yah, content='Wooden [[Include:Boo]] Bats',
is_approved=True).save()
recursion_message = RECURSION_MESSAGE % 'Boo'
# boo.content_parsed is something like <p>Paper </p><p>Wooden
# [Recursive inclusion of "Boo"] Bats\n</p> Cups\n<p></p>.
eq_('Paper Wooden %s Bats Cups' % recursion_message,
re.sub(r'</?p>|\n', '', boo.content_parsed))
class TestWikiVideo(TestCase):
"""Video hook."""
def tearDown(self):
Video.objects.all().delete()
super(TestWikiVideo, self).tearDown()
def test_video_english(self):
"""Video is created and found in English."""
v = video()
d, _, p = doc_rev_parser('[[V:Some title]]')
doc = pq(d.html)
eq_('video', doc('div.video').attr('class'))
# This test and the code it tests hasn't changed in
# months. However, this test started failing for Mike and I
# early July 2013. We think we picked up libxml2 2.9.1 and
# that causes the output to be different. I contend that the
# output and expected output are both "wrong" in that they're
# invalid html5 and the output I'm getting isn't really any
# worse. Ergo, I have changed the test to accept either output
# because I got stuff to do. Having said that, this is kind of
# ridiculous and should be fixed. See bug #892610.
assert doc('video').html() in [
# This was the original expected test output.
(u'<source src="{0}" '
u'type="video/webm"><source src="{1}" type="video/ogg"/>'
u'</source>'.format(v.webm.url, v.ogv.url)),
# This is the version that Mike and I get.
(u'\n <source src="{0}" type="video/webm">'
u'\n <source src="{1}" type="video/ogg">'
u'\n </source></source>'.format(v.webm.url, v.ogv.url))]
eq_(1, len(doc('video')))
eq_(2, len(doc('source')))
data_fallback = doc('video').attr('data-fallback')
eq_(v.flv.url, data_fallback)
def test_video_fallback_french(self):
"""English video is found in French."""
p = WikiParser()
self.test_video_english()
doc = pq(p.parse('[[V:Some title]]', locale='fr'))
eq_('video', doc('div.video').attr('class'))
eq_(1, len(doc('video')))
eq_(2, len(doc('source')))
data_fallback = doc('video').attr('data-fallback')
eq_(Video.objects.all()[0].flv.url, data_fallback)
def test_video_not_exist(self):
"""Video does not exist."""
p = WikiParser()
doc = pq(p.parse('[[V:Some title]]', locale='fr'))
eq_('The video "Some title" does not exist.', doc.text())
def test_video_modal(self):
"""Video modal defaults for plcaeholder and text."""
v = video()
replacement = ('<img class="video-thumbnail" src="%s"/>' %
v.thumbnail_url_if_set())
d, _, p = doc_rev_parser(
'[[V:Some title|modal]]')
doc = pq(d.html)
eq_('Some title', doc('.video-modal')[0].attrib['title'])
eq_(1, doc('.video video').length)
eq_(replacement, doc('.video-placeholder').html().strip())
eq_('video modal-trigger', doc('div.video').attr('class'))
def test_video_modal_caption_text(self):
"""Video modal can change title and placeholder text."""
video()
d, _, p = doc_rev_parser(
'[[V:Some title|modal|placeholder=Place<b>holder</b>|title=WOOT]]')
doc = pq(d.html)
eq_('WOOT', doc('.video-modal')[0].attrib['title'])
eq_('Place<b>holder</b>', doc('.video-placeholder').html().strip())
def test_video_cdn(self):
"""Video URLs can link to the CDN if a CDN setting is set."""
video()
cdn_url = 'http://videos.mozilla.org/serv/sumo/'
self.old_settings = settings.GALLERY_VIDEO_URL
settings.GALLERY_VIDEO_URL = cdn_url
d, _, p = doc_rev_parser('[[V:Some title]]')
settings.GALLERY_VIDEO_URL = self.old_settings
doc = pq(d.html)
assert cdn_url in doc('video').attr('data-fallback')
assert cdn_url in doc('source').eq(0).attr('src')
assert cdn_url in doc('source').eq(1).attr('src')
def test_youtube_video(self):
"""Verify youtube embeds."""
urls = ['http://www.youtube.com/watch?v=oHg5SJYRHA0',
'https://youtube.com/watch?v=oHg5SJYRHA0'
'http://youtu.be/oHg5SJYRHA0'
'https://youtu.be/oHg5SJYRHA0']
parser = WikiParser()
for url in urls:
doc = pq(parser.parse('[[V:%s]]' % url))
assert doc('iframe')[0].attrib['src'].startswith(
'//www.youtube.com/embed/oHg5SJYRHA0')
def parsed_eq(want, to_parse):
p = WikiParser()
eq_(want, p.parse(to_parse).strip().replace('\n', ''))
class ForWikiTests(TestCase):
"""Tests for the wiki implementation of the {for} directive, which
arranges for certain parts of the page to show only when viewed on certain
OSes or browser versions"""
def test_block(self):
"""A {for} set off by itself or wrapping a block-level element should
be a paragraph or other kind of block-level thing."""
parsed_eq('<p>Joe</p><p><span class="for">Red</span></p>'
'<p>Blow</p>',
'Joe\n\n{for}Red{/for}\n\nBlow')
parsed_eq('<p>Joe</p><div class="for"><ul><li> Red</li></ul></div>'
'<p>Blow</p>',
'Joe\n\n{for}\n* Red\n{/for}\n\nBlow')
def test_inline(self):
"""A for not meeting the conditions in test_block should be inline.
"""
parsed_eq('<p>Joe</p>'
'<p>Red <span class="for">riding</span> hood</p>'
'<p>Blow</p>',
'Joe\n\nRed {for}riding{/for} hood\n\nBlow')
def test_nested(self):
"""{for} tags should be nestable."""
parsed_eq('<div class="for" data-for="mac">'
'<p>Joe</p>'
'<p>Red <span class="for"><span class="for">riding'
'</span> hood</span></p>'
'<p>Blow</p>'
'</div>',
'{for mac}\n'
'Joe\n'
'\n'
'Red {for}{for}riding\n'
'{/for} hood{/for}\n'
'\n'
'Blow\n'
'{/for}')
def test_data_attrs(self):
"""Make sure the correct attributes are set on the for element."""
parsed_eq('<p><span class="for" data-for="mac,linux,3.6">'
'One</span></p>',
'{for mac,linux,3.6}One{/for}')
def test_early_close(self):
"""Make sure the parser closes the for tag at the right place when
its closer is early."""
parsed_eq('<div class="for"><p>One</p>'
'<ul><li>Fish</li></ul></div>',
'{for}\nOne\n\n*Fish{/for}')
def test_late_close(self):
"""If the closing for tag is not closed by the time the enclosing
element of the opening for tag is closed, close the for tag
just before the enclosing element."""
parsed_eq(
'<ul><li><span class="for">One</span></li>'
'<li>Fish</li></ul><p>Two</p>',
'*{for}One\n*Fish\n\nTwo\n{/for}')
def test_missing_close(self):
"""If the closing for tag is missing, close the for tag just
before the enclosing element."""
parsed_eq(
'<p><span class="for">One fish</span></p><p>Two fish</p>',
'{for}One fish\n\nTwo fish')
def test_unicode(self):
"""Make sure non-ASCII chars survive being wrapped in a for."""
french = u'Vous parl\u00e9 Fran\u00e7ais'
parsed_eq('<p><span class="for">' + french + '</span></p>',
'{for}' + french + '{/for}')
def test_boolean_attr(self):
"""Make sure empty attributes don't raise exceptions."""
parsed_eq('<p><video controls height="120">'
' <source src="/some/path/file.ogv" type="video/ogv">'
'</video></p>',
'<p><video controls="" height="120">'
' <source src="/some/path/file.ogv" type="video/ogv">'
'</video></p>')
def test_adjacent_blocks(self):
"""Make sure one block-level {for} doesn't absorb an adjacent one."""
p = WikiParser()
html = p.parse('{for fx4}\n'
'{for mac}Fx4{/for}\n'
'{/for}\n'
'{for fx3}\n'
'{for mac}Fx3{/for}\n'
'{/for}')
# The two div.fors should be siblings, not nested:
eq_([], pq(html)('div.for div.for'))
def test_leading_newlines(self):
"""Make sure leading newlines don't cause a block-level {for} to be
sucked into the leading blank paragraph, causing the actual text to
always be shown."""
doc = pq(WikiParser().parse('\n\n{for linux}\nunixify\n{/for}'))
eq_('unixify', doc('.for').text().strip())
def test_big_swath(self):
"""Enclose a big section containing many tags."""
parsed_eq('<div class="for"><h1 id="w_h1">H1</h1>'
'<h2 id="w_h2">H2</h2><p>Llamas are fun:</p>'
'<ul><li>Jumping</li><li>Rolling</li><li>Grazing</li></ul>'
'<p>They have high melting points.</p></div>',
'{for}\n'
'=H1=\n'
'==H2==\n'
'Llamas are fun:\n'
'\n'
'*Jumping\n'
'*Rolling\n'
'*Grazing\n'
'\n'
'They have high melting points.\n'
'{/for}')
def test_block_level_section(self):
"""Make sure we recognize <section> as a block element."""
p = WikiParser()
html = p.parse('{for}<section>hi</section>{/for}')
assert '<div' in html, "Didn't detect <section> tag as block level"
def balanced_eq(want, to_balance):
"""Run `to_balance` through the expander to get its tags balanced, and
assert the result is `want`."""
expander = ForParser(to_balance)
eq_(want, expander.to_unicode())
def expanded_eq(want, to_expand):
"""Balance and expand the fors in `to_expand`, and assert equality with
`want`."""
expander = ForParser(to_expand)
expander.expand_fors()
eq_(want, expander.to_unicode())
def strip_eq(want, text):
eq_(want, ForParser.strip_fors(text)[0])
class ForParserTests(TestCase):
"""Tests for the ForParser
These are unit tests for ForParser, and ForWikiTests are
(as a bonus) integration tests for it.
"""
def test_well_formed(self):
"""Make sure the expander works on well-formed fragments."""
html = '<ul><li type="1"><br><for>One</for></li></ul>'
balanced_eq(html, html)
def test_document_mode(self):
"""Make sure text chunks interspersed with tags are parsed right."""
html = '<p>Hello<br>there, <br>you.</p>'
balanced_eq(html, html)
def test_early_close(self):
"""Make sure the parser closes the for tag at the right place when
its closer is early."""
balanced_eq('<div><for><p>One</p></for></div>',
'<div><for><p>One</for></for></p></div>')
def test_late_close(self):
"""If the closing for tag is not closed by the time the enclosing
element of the opening for tag is closed, close the for tag
just before the enclosing element."""
balanced_eq(
'<ul><li><for><for>One</for></for></li></ul>',
'<ul><li><for><for>One</li></ul></for>')
def test_close_absent_at_end(self):
"""Make sure the parser closes for tags left open at the EOF.
This mattered more when we weren't building a parse tree.
"""
balanced_eq('<for><p>One</p></for>',
'<for><p>One</for></for></p>')
def test_unicode(self):
"""Make sure this all works with non-ASCII chars."""
html = u'<for>Vous parl\u00e9 Fran\u00e7ais</for>'
balanced_eq(html, html)
def test_div(self):
"""Make sure we use divs for fors containing block elements."""
expanded_eq('<div class="for"><p>One</p></div>',
'<for><p>One</p></for>')
def test_span(self):
"""Make sure we use spans for fors containing no block elements."""
expanded_eq('<span class="for"><em>One</em></span>',
'<for><em>One</em></for>')
def test_data_attrs(self):
"""Make sure the data- attributes look good."""
expanded_eq('<span class="for" data-for="mac,linux">One</span>',
'<for data-for="mac,linux">One</for>')
def test_on_own_line(self):
def on_own_line_eq(want, text):
"""Assert that on_own_line operates as expected on the first match
in `text`."""
match = ForParser._FOR_OR_CLOSER.search(text)
eq_(want, ForParser._on_own_line(match, match.groups(3)))
on_own_line_eq((True, True, True), '{for}')
on_own_line_eq((True, True, True), '{for} ')
on_own_line_eq((False, False, True), ' {for}')
on_own_line_eq((True, False, True), 'q\n{for}')
on_own_line_eq((False, True, False), '{for}q')
on_own_line_eq((True, False, False), '\n{for} \nq')
def test_strip(self):
strip_eq('\x070\x07inline\x07/sf\x07', '{for}inline{/for}')
strip_eq('\x070\x07\n\nblock\n\n\x07/sf\x07',
'{for}\nblock\n{/for}')
strip_eq('\x070\x07inline\n\n\x07/sf\x07',
'{for}inline\n{/for}')
strip_eq('\x070\x07\n\nblock\x07/sf\x07', '{for}\nblock{/for}')
def test_whitespace_lookbehind(self):
"""Assert strip_fors is aware of newlines preceding the current match.
This used to fail because both the postspace for the first closer and
the prespace for the 2nd got 1 \n added, resulting in 3, which is 1
too many. Now we use the preceding_whitespace function to look behind
and take preceding newlines into account.
"""
strip_eq('\x070\x07\n\n\x071\x07inline\x07/sf\x07\n\n\x07/sf\x07',
'{for}\n{for}inline{/for}\n{/for}')
def test_matches_see_replacements(self):
"""Make sure each whitespace lookbehind takes into account the effect
of previous replacements' whitespace additions.
When this bug existed, strip_fors would add a \n for postspace to the
2nd {/for}, but then the preceding_whitespace call for the next {for}
wouldn't see what was added, since it was still looking in the
original string, without the replacements applied.
"""
strip_eq('\x070\x07\n\n\x071\x07Fx4\x07/sf\x07\n\n\x07/sf\x07\n\n'
'\x072\x07\n\n\x073\x07Fx3\x07/sf\x07\n\n\x07/sf\x07',
'{for fx4}\n'
'{for mac}Fx4{/for}\n'
'{/for}\n'
'{for fx3}\n'
'{for mac}Fx3{/for}\n'
'{/for}')
def test_self_closers(self):
"""Make sure self-closing tags aren't balanced as paired ones."""
balanced_eq('<img src="smoo"><span>g</span>',
'<img src="smoo"><span>g</span>')
balanced_eq('<img src="smoo"><span>g</span>',
'<img src="smoo"/><span>g</span>')
def test_leading_text_nodes(self):
"""Make sure the parser handles a leading naked run of text.
Test inner runs of text while we're at it.
"""
html = 'A<i>hi</i>B<i>there</i>C'
p = ForParser(html)
eq_(html, p.to_unicode())
class WhatLinksHereTests(TestCase):
def test_links(self):
d1, _, _ = doc_rev_parser('', title='D1')
d2, _, _ = doc_rev_parser('[[D1]]', title='D2')
d3, _, _ = doc_rev_parser('[[D1]] [[D2]]', title='D3')
eq_(len(d1.links_to()), 2)
eq_(len(d1.links_from()), 0)
eq_(len(d2.links_to()), 1)
eq_(len(d2.links_from()), 1)
eq_(len(d3.links_to()), 0)
eq_(len(d3.links_from()), 2)
eq_([d.linked_from.title for d in d1.links_to()], ['D2', 'D3'])
eq_([d.kind for d in d1.links_to()], ['link', 'link'])
eq_([d.linked_from.title for d in d2.links_to()], ['D3'])
def test_templates(self):
d1, _, _ = doc_rev_parser('Oh hai', title='Template:D1')
d2, _, _ = doc_rev_parser('[[Template:D1]]', title='D2')
eq_(len(d1.links_to()), 1)
eq_(len(d1.links_from()), 0)
eq_(len(d2.links_to()), 0)
eq_(len(d2.links_from()), 1)
eq_(d1.links_to()[0].kind, 'template')
def test_includes(self):
d1, _, _ = doc_rev_parser('Oh hai', title='D1')
d2, _, _ = doc_rev_parser('[[Include:D1]]', title='D2')
eq_(len(d1.links_to()), 1)
eq_(len(d1.links_from()), 0)
eq_(len(d2.links_to()), 0)
eq_(len(d2.links_from()), 1)
eq_(d1.links_to()[0].kind, 'include')
def test_duplicates(self):
"""Document.links_to and Document.links_from should only count
documents that link, not every instance of a link on a page.
Make sure that things work that way."""
d1, _, _ = doc_rev_parser('', title='D1')
d2, _, _ = doc_rev_parser('[[D1]] [[D1]] [[D1]]', title='D2')
eq_(len(d1.links_to()), 1)
eq_(len(d1.links_from()), 0)
eq_(len(d2.links_to()), 0)
eq_(len(d2.links_from()), 1)
eq_(d1.links_to()[0].kind, 'link')
def test_locales_exists(self):
"""Links should use the correct locale."""
d1 = document(title='Foo', locale='en-US', save=True)
revision(document=d1, content='', is_approved=True, save=True)
d2 = document(title='Foo', locale='de', save=True)
revision(document=d2, content='', is_approved=True, save=True)
d3 = document(title='Bar', locale='de', save=True)
revision(document=d3, content='[[Foo]]', is_approved=True, save=True)
eq_(len(d1.links_to()), 0)
eq_(len(d1.links_from()), 0)
eq_(len(d2.links_to()), 1)
eq_(len(d2.links_from()), 0)
eq_(len(d3.links_to()), 0)
eq_(len(d3.links_from()), 1)
eq_(d2.links_to()[0].kind, 'link')
def test_locales_renames(self):
"""Links should use the correct locale, even if the title has
been translated."""
d1 = document(title='Foo', locale='en-US', save=True)
revision(document=d1, content='', is_approved=True, save=True)
d2 = document(title='German Foo', locale='de', parent=d1, save=True)
revision(document=d2, content='', is_approved=True, save=True)
d3 = document(title='German Bar', locale='de', save=True)
revision(document=d3, content='[[Foo]]', is_approved=True, save=True)
eq_(len(d1.links_to()), 0)
eq_(len(d1.links_from()), 0)
eq_(len(d2.links_to()), 1)
eq_(len(d2.links_from()), 0)
eq_(len(d3.links_to()), 0)
eq_(len(d3.links_from()), 1)
eq_(d2.links_to()[0].kind, 'link')
def test_unicode(self):
"""Unicode is hard. Test that."""
# \u03C0 is pi and \u2764 is a heart symbol.
d1 = document(title=u'\u03C0', slug='pi', save=True)
revision(document=d1, content=u'I \u2764 \u03C0', is_approved=True,
save=True)
d2 = document(title=u'\u2764', slug='heart', save=True)
revision(document=d2, content=u'What do you think about [[\u03C0]]?',
is_approved=True, save=True)
eq_(len(d1.links_to()), 1)
eq_(len(d1.links_from()), 0)
eq_(len(d2.links_to()), 0)
eq_(len(d2.links_from()), 1)
eq_(d1.links_to()[0].kind, 'link')
def test_old_revisions(self):
"""Bug 862436. Updating old revisions could cause bad WLH data."""
d1 = document(title='D1', save=True)
revision(document=d1, content='', is_approved=True, save=True)
d2 = document(title='D2', save=True)
revision(document=d2, content='', is_approved=True, save=True)
# Make D3, then make a revision that links to D1, then a
# revision that links to D2. Only the link to D2 should count.
d3 = document(title='D3', save=True)
r3_old = revision(document=d3, content='[[D1]]', is_approved=True,
save=True)
revision(document=d3, content='[[D2]]', is_approved=True, save=True)
# This could cause stale data
r3_old.content_parsed
# D1 is not linked to in any current revisions.
eq_(len(d1.links_to()), 0)
eq_(len(d1.links_from()), 0)
eq_(len(d2.links_to()), 1)
eq_(len(d2.links_from()), 0)
eq_(len(d3.links_to()), 0)
eq_(len(d3.links_from()), 1)
def test_images(self):
img = image(title='image-file.png')
d1, _, _ = doc_rev_parser('[[Image:image-file.png]]', title='D1')
eq_(len(d1.images), 1)
eq_(d1.images[0], img)
eq_(len(img.documents), 1)
eq_(img.documents[0], d1)
class TestLazyWikiImageTags(TestCase):
def setUp(self):
self.d, self.r, self.p = doc_rev_parser(
'Test content', 'Installing Firefox')
self.img = image(title='test.jpg')
def tearDown(self):
self.img.delete()
def test_simple(self):
"""Simple image tag markup."""
doc = pq(self.p.parse('[[Image:test.jpg]]',
locale=settings.WIKI_DEFAULT_LANGUAGE))
img = doc('img')
eq_('test.jpg', img.attr('alt'))
eq_(self.img.file.url, img.attr('data-original-src'))
assert 'placeholder.gif' in img.attr('src')
``` |
{
"source": "joshua-software-dev/FF14AnglerParser",
"score": 3
} |
#### File: ff14angler/database/alchemyMapping.py
```python
import uuid
from sqlalchemy import (
BigInteger,
Binary,
Boolean,
Column,
DECIMAL,
DateTime,
ForeignKey,
Index,
Integer,
SmallInteger,
String,
Text,
types
)
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
# noinspection PyAbstractClass
class UUID(types.TypeDecorator):
impl = Binary
def __init__(self):
self.impl.length = 16
types.TypeDecorator.__init__(self, length=self.impl.length)
def process_bind_param(self, value, dialect=None):
if value and isinstance(value, uuid.UUID):
return value.bytes
elif value and not isinstance(value, uuid.UUID):
raise ValueError(f'value {value} is not a valid uuid.UUID')
else:
return None
def process_result_value(self, value, dialect=None):
if value:
return str(uuid.UUID(bytes=value))
else:
return None
@staticmethod
def is_mutable():
return False
# noinspection SpellCheckingInspection
class Bait(Base):
__tablename__ = 'bait'
bait_angler_bait_id = Column(BigInteger, primary_key=True)
bait_xivapi_item_id = Column(BigInteger, nullable=False, index=True)
bait_angler_name = Column(String(4096), nullable=False)
bait_item_name_de = Column(String(4096), nullable=False)
bait_item_name_en = Column(String(4096), nullable=False)
bait_item_name_fr = Column(String(4096), nullable=False)
bait_item_name_ja = Column(String(4096), nullable=False)
bait_icon_url = Column(String(2083), nullable=False)
bait_large_icon_url = Column(String(2083))
bait_lodestone_url = Column(String(2083))
bait_item_level = Column(SmallInteger, nullable=False)
bait_item_description_de = Column(Text, nullable=False)
bait_item_description_en = Column(Text, nullable=False)
bait_item_description_fr = Column(Text, nullable=False)
bait_item_description_ja = Column(Text, nullable=False)
bait_gil_cost = Column(Integer)
bait_gil_sell_price = Column(Integer)
bait_is_mooch_fish = Column(Boolean, nullable=False)
comment = relationship('Comment', secondary='bait_comment')
spot_angler_spots = relationship('Spot', secondary='spot_effective_bait')
# noinspection SpellCheckingInspection
class Fish(Base):
__tablename__ = 'fish'
fish_angler_fish_id = Column(BigInteger, primary_key=True)
fish_xivapi_item_id = Column(BigInteger, nullable=False, index=True)
fish_angler_name = Column(String(4096), nullable=False)
fish_item_name_de = Column(String(4096), nullable=False)
fish_item_name_en = Column(String(4096), nullable=False)
fish_item_name_fr = Column(String(4096), nullable=False)
fish_item_name_ja = Column(String(4096), nullable=False)
fish_icon_url = Column(String(2083), nullable=False)
fish_large_icon_url = Column(String(2083))
fish_lodestone_url = Column(String(2083))
fish_item_level = Column(SmallInteger, nullable=False)
fish_item_description_de = Column(Text, nullable=False)
fish_item_description_en = Column(Text, nullable=False)
fish_item_description_fr = Column(Text, nullable=False)
fish_item_description_ja = Column(Text, nullable=False)
fish_fishing_log_description_de = Column(Text)
fish_fishing_log_description_en = Column(Text)
fish_fishing_log_description_fr = Column(Text)
fish_fishing_log_description_ja = Column(Text)
fish_introduced_patch = Column(String(8), nullable=False)
fish_angler_territory = Column(String(128))
fish_angler_item_category = Column(String(128), nullable=False)
fish_angler_double_hooking_count = Column(String(8), nullable=False)
fish_angler_aquarium_size = Column(String(8))
fish_angler_canvas_size = Column(String(8))
spot_angler_spots = relationship('Spot', secondary='spot_available_fish')
# noinspection SpellCheckingInspection
class Spot(Base):
__tablename__ = 'spot'
__table_args__ = (
Index('spot_spot_gathering_type', 'spot_gathering_type', 'spot_gathering_type_unique_id'),
)
spot_angler_spot_id = Column(BigInteger, primary_key=True)
spot_gathering_type = Column(Text, nullable=False)
spot_gathering_type_unique_id = Column(BigInteger, nullable=False)
spot_angler_area_id = Column(BigInteger, nullable=False)
spot_angler_place_name = Column(String(4096), nullable=False)
spot_place_name_de = Column(String(4096), nullable=False)
spot_place_name_en = Column(String(4096), nullable=False)
spot_place_name_fr = Column(String(4096), nullable=False)
spot_place_name_ja = Column(String(4096), nullable=False)
spot_angler_zone_name = Column(String(4096), nullable=False)
spot_zone_name_de = Column(String(4096), nullable=False)
spot_zone_name_en = Column(String(4096), nullable=False)
spot_zone_name_fr = Column(String(4096), nullable=False)
spot_zone_name_ja = Column(String(4096), nullable=False)
spot_is_teeming_waters = Column(Boolean, nullable=False)
spot_gathering_level = Column(SmallInteger, nullable=False)
spot_angler_x_coord = Column(SmallInteger, nullable=False)
spot_angler_y_coord = Column(SmallInteger, nullable=False)
spot_angler_fishers_intuition_comment = Column(String(512))
# noinspection SpellCheckingInspection,SpellCheckingInspection
class Comment(Base):
__tablename__ = 'comment'
comment_uuid = Column(UUID(), primary_key=True)
comment_fetch_timestamp = Column(DateTime, nullable=False)
comment_timestamp = Column(DateTime, nullable=False)
comment_author = Column(String(256), nullable=False)
comment_html = Column(Text, nullable=False)
comment_text_original = Column(Text, nullable=False)
comment_text_translated = Column(Text, nullable=False)
fish_angler_fishs = relationship('Fish', secondary='fish_comment')
spot_angler_spots = relationship('Spot', secondary='spot_comment')
# noinspection SpellCheckingInspection
class BaitComment(Comment):
__tablename__ = 'bait_comment'
bait_angler_bait_id = Column(ForeignKey('bait.bait_angler_bait_id'), primary_key=True, nullable=False, index=True)
comment_uuid = Column(ForeignKey('comment.comment_uuid'), primary_key=True, nullable=False)
# noinspection SpellCheckingInspection
class FishComment(Comment):
__tablename__ = 'fish_comment'
fish_angler_fish_id = Column(ForeignKey('fish.fish_angler_fish_id'), primary_key=True, nullable=False, index=True)
comment_uuid = Column(ForeignKey('comment.comment_uuid'), primary_key=True, nullable=False)
# noinspection SpellCheckingInspection
class SpotComment(Comment):
__tablename__ = 'spot_comment'
spot_angler_spot_id = Column(ForeignKey('spot.spot_angler_spot_id'), primary_key=True, nullable=False, index=True)
comment_uuid = Column(ForeignKey('comment.comment_uuid'), primary_key=True, nullable=False)
# noinspection SpellCheckingInspection
class BaitAltCurrencyPrice(Base):
__tablename__ = 'bait_alt_currency_price'
bait_angler_bait_id = Column(ForeignKey('bait.bait_angler_bait_id'), primary_key=True, nullable=False, index=True)
bait_alt_currency_id = Column(BigInteger, primary_key=True, nullable=False)
bait_alt_currency_cost = Column(Integer, primary_key=True, nullable=False)
bait_alt_currency_name = Column(String(4096), nullable=False)
bait_angler_bait = relationship('Bait')
# noinspection SpellCheckingInspection
class FishBaitPreference(Base):
__tablename__ = 'fish_bait_preference'
fish_angler_fish_id = Column(BigInteger, primary_key=True, nullable=False, index=True)
bait_angler_bait_id = Column(BigInteger, primary_key=True, nullable=False, index=True)
bait_percentage = Column(String(8), nullable=False)
# noinspection SpellCheckingInspection
class FishCaughtCount(Base):
__tablename__ = 'fish_caught_count'
fish_angler_fish_id = Column(ForeignKey('fish.fish_angler_fish_id'), primary_key=True)
fish_caught_all_hours_count = Column(BigInteger, nullable=False)
fish_caught_all_weathers_count = Column(BigInteger)
# noinspection SpellCheckingInspection
class FishCaughtPerHour(Base):
__tablename__ = 'fish_caught_per_hour'
fish_angler_fish_id = Column(ForeignKey('fish.fish_angler_fish_id'), primary_key=True, nullable=False)
hour_num = Column(Integer, primary_key=True, nullable=False)
hour_fish_caught_count = Column(BigInteger, nullable=False)
fish_angler_fish = relationship('Fish')
# noinspection SpellCheckingInspection
class FishCaughtPerWeather(Base):
__tablename__ = 'fish_caught_per_weather'
fish_angler_fish_id = Column(ForeignKey('fish.fish_angler_fish_id'), primary_key=True, nullable=False)
weather_name = Column(String(128), primary_key=True, nullable=False)
weather_fish_caught_count = Column(BigInteger, nullable=False)
fish_angler_fish = relationship('Fish')
# noinspection SpellCheckingInspection
class FishDesynthesisItem(Base):
__tablename__ = 'fish_desynthesis_item'
fish_angler_fish_id = Column(ForeignKey('fish.fish_angler_fish_id'), primary_key=True, nullable=False)
desynthesis_item_id = Column(BigInteger, primary_key=True, nullable=False)
desynthesis_item_name_de = Column(String(4096), nullable=False)
desynthesis_item_name_en = Column(String(4096), nullable=False)
desynthesis_item_name_fr = Column(String(4096), nullable=False)
desynthesis_item_name_ja = Column(String(4096), nullable=False)
desynthesis_icon_url = Column(String(2083), nullable=False)
desynthesis_large_icon_url = Column(String(2083))
desynthesis_angler_item_name = Column(String(4096), nullable=False)
desynthesis_angler_lodestone_url = Column(String(2083))
desynthesis_angler_percentage = Column(String(8), nullable=False)
fish_angler_fish = relationship('Fish')
# noinspection SpellCheckingInspection
class FishInvolvedLeve(Base):
__tablename__ = 'fish_involved_leve'
fish_angler_fish_id = Column(ForeignKey('fish.fish_angler_fish_id'), primary_key=True, nullable=False, index=True)
leve_id = Column(BigInteger, primary_key=True, nullable=False)
leve_name_de = Column(String(4096), nullable=False)
leve_name_en = Column(String(4096), nullable=False)
leve_name_fr = Column(String(4096), nullable=False)
leve_name_ja = Column(String(4096), nullable=False)
leve_angler_name = Column(String(4096), nullable=False)
leve_angler_name_jp = Column(String(4096), nullable=False)
leve_level = Column(SmallInteger, nullable=False)
leve_angler_turn_in_count = Column(SmallInteger, nullable=False)
fish_angler_fish = relationship('Fish')
# noinspection SpellCheckingInspection
class FishInvolvedRecipe(Base):
__tablename__ = 'fish_involved_recipe'
fish_angler_fish_id = Column(ForeignKey('fish.fish_angler_fish_id'), primary_key=True, nullable=False, index=True)
recipe_item_id = Column(BigInteger, primary_key=True, nullable=False)
recipe_item_name_de = Column(String(4096), nullable=False)
recipe_item_name_en = Column(String(4096), nullable=False)
recipe_item_name_fr = Column(String(4096), nullable=False)
recipe_item_name_ja = Column(String(4096), nullable=False)
recipe_angler_name = Column(String(4096), nullable=False)
recipe_icon_url = Column(String(2083), nullable=False)
recipe_large_icon_url = Column(String(2083))
recipe_angler_lodestone_url = Column(String(2083))
recipe_angler_crafting_class = Column(Text, nullable=False)
fish_angler_fish = relationship('Fish')
# noinspection SpellCheckingInspection
class FishTugStrength(Base):
__tablename__ = 'fish_tug_strength'
fish_angler_fish_id = Column(ForeignKey('fish.fish_angler_fish_id'), primary_key=True, nullable=False, index=True)
tug_strength = Column(Integer, primary_key=True, nullable=False)
tug_strength_percent = Column(DECIMAL, nullable=False)
fish_angler_fish = relationship('Fish')
# noinspection SpellCheckingInspection
class SpotAvailableFish(Base):
__tablename__ = 'spot_available_fish'
spot_angler_spot_id = Column(ForeignKey('spot.spot_angler_spot_id'), primary_key=True, nullable=False, index=True)
fish_angler_fish_id = Column(ForeignKey('fish.fish_angler_fish_id'), primary_key=True, nullable=False, index=True)
# noinspection SpellCheckingInspection
class SpotBaitFishCatchInfo(Base):
__tablename__ = 'spot_bait_fish_catch_info'
spot_angler_spot_id = Column(ForeignKey('spot.spot_angler_spot_id'), primary_key=True, nullable=False, index=True)
bait_angler_bait_id = Column(ForeignKey('bait.bait_angler_bait_id'), primary_key=True, nullable=False, index=True)
fish_angler_fish_id = Column(ForeignKey('fish.fish_angler_fish_id'), primary_key=True, nullable=False, index=True)
spot_bait_fish_catch_count = Column(BigInteger, nullable=False)
spot_bait_fish_average_seconds_to_hook = Column(SmallInteger)
spot_bait_fish_catch_percentage = Column(String(8), nullable=False)
bait_angler_bait = relationship('Bait')
fish_angler_fish = relationship('Fish')
spot_angler_spot = relationship('Spot')
# noinspection SpellCheckingInspection
class SpotBaitTotalFishCaught(Base):
__tablename__ = 'spot_bait_total_fish_caught'
spot_angler_spot_id = Column(ForeignKey('spot.spot_angler_spot_id'), primary_key=True, nullable=False, index=True)
bait_angler_bait_id = Column(ForeignKey('bait.bait_angler_bait_id'), primary_key=True, nullable=False, index=True)
spot_bait_total_catch_count = Column(BigInteger, nullable=False)
bait_angler_bait = relationship('Bait')
spot_angler_spot = relationship('Spot')
# noinspection SpellCheckingInspection
class SpotEffectiveBait(Base):
__tablename__ = 'spot_effective_bait'
spot_angler_spot_id = Column(ForeignKey('spot.spot_angler_spot_id'), primary_key=True, nullable=False, index=True)
bait_angler_bait_id = Column(ForeignKey('bait.bait_angler_bait_id'), primary_key=True, nullable=False, index=True)
# noinspection SpellCheckingInspection
class LastUpdated(Base):
__tablename__ = 'last_updated'
last_updated_timestamp = Column(DateTime, primary_key=True, nullable=False)
bait_count = Column(BigInteger, nullable=False)
fish_count = Column(BigInteger, nullable=False)
spot_count = Column(BigInteger, nullable=False)
```
#### File: dataClasses/bait/baitId.py
```python
from dataclasses import dataclass
from typing import Optional
from dataclasses_json import DataClassJsonMixin
from ff14angler.constants.data_corrections import angler_bait_name_corrections, angler_bait_name_do_not_search
from ff14angler.network.xivapiWrapper import XivapiWrapper
@dataclass
class BaitId(DataClassJsonMixin):
bait_angler_bait_id: int
bait_xivapi_item_id: Optional[int]
@classmethod
async def get_bait_id_from_angler_bait(cls, bait_angler_id: int, bait_angler_name: str):
if bait_angler_name in angler_bait_name_do_not_search:
search_response = {'ID': None}
else:
search_response = await XivapiWrapper.xivapi_item_search(
angler_bait_name_corrections.get(bait_angler_name) or bait_angler_name
)
return cls(bait_angler_bait_id=bait_angler_id, bait_xivapi_item_id=search_response['ID'])
```
#### File: dataClasses/bait/baitProvider.py
```python
from typing import Dict, List, Optional
from bs4 import BeautifulSoup # type: ignore
from bs4.element import Tag # type: ignore
from ff14angler.constants.regex import mooch_name_regex, non_number_replacement_regex
from ff14angler.dataClasses.bait.bait import Bait
from ff14angler.dataClasses.bait.baitPercentage import BaitPercentage
class BaitProvider:
bait_holder: Dict[int, Bait] = dict()
@staticmethod
async def _parse_angler_bait_id(td2: Tag) -> int:
a_tag = td2.find('a')
if a_tag:
return int(non_number_replacement_regex.sub(repl='', string=a_tag.attrs['href']))
return int(non_number_replacement_regex.sub(repl='', string=td2.find('img').attrs['src']))
@classmethod
async def get_bait_from_angler_bait(cls, bait_angler_id: int, bait_angler_name: str) -> Bait:
if result := cls.bait_holder.get(bait_angler_id):
return result
cls.bait_holder[bait_angler_id] = await Bait.get_bait_from_angler_bait(
bait_angler_id=bait_angler_id,
bait_angler_name=bait_angler_name
)
return cls.bait_holder[bait_angler_id]
@classmethod
async def get_bait_percentage_list_from_fish_soup(cls, soup: BeautifulSoup) -> List[BaitPercentage]:
temp_bait_list: List[BaitPercentage] = []
bait_section: Optional[Tag] = soup.find('form', {'name': 'bait_delete'})
if not bait_section:
return temp_bait_list
for child in bait_section.find_all('td', {'class': 'width_max'}): # type: Tag
parent: Tag = child.parent
td1, td2 = parent.find_all('td')[:2] # type: Tag, Tag
angler_bait_id: int = await cls._parse_angler_bait_id(td2)
angler_bait_name: str = mooch_name_regex.sub('', td2.text).strip()
bait = await cls.get_bait_from_angler_bait(angler_bait_id, angler_bait_name)
if bait.bait_item_name_en is None:
await bait.update_bait_with_assume_is_mooch_fish()
temp_bait_list.append(
BaitPercentage(
bait_id=bait.bait_id,
bait_percentage=td1.text.strip()
)
)
return temp_bait_list
```
#### File: dataClasses/comment/commentSection.py
```python
import json
import urllib.parse
import lxml # type: ignore
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Any, Dict, List, Tuple
from bs4 import BeautifulSoup # type: ignore
from dataclasses_json import DataClassJsonMixin
from selenium.webdriver.chrome.webdriver import WebDriver # type: ignore
from ff14angler.constants.javascript import comment_metadata_javascript
from ff14angler.constants.regex import non_number_replacement_regex
from ff14angler.constants.values import ANGLER_BASE_URL
from ff14angler.dataClasses.comment.comment import Comment
@dataclass
class CommentSection(DataClassJsonMixin):
comments: List[Comment]
comment_fetch_timestamp: datetime = field(
default_factory=lambda: datetime.utcnow().replace(microsecond=0, tzinfo=timezone.utc),
metadata={
'dataclasses_json': {
'decoder': datetime.fromisoformat,
'encoder': datetime.isoformat
}
}
)
@classmethod
async def _get_request_metadata_from_web_driver(cls, driver: WebDriver) -> Tuple[int, int, int, int]:
response: Tuple[int, str, str, str] = driver.execute_script(comment_metadata_javascript)
request_id: int = response[0]
type_id: int = int(response[1])
item_id: int = int(response[2])
max_comments: int = int(non_number_replacement_regex.sub(repl='', string=response[3]) or '0')
return request_id, type_id, item_id, max_comments
@classmethod
async def parse_comment_section(cls, comment_list: List[Dict[str, Any]]) -> 'CommentSection':
parsed_comment_list: List[Comment] = []
for comment_json in comment_list:
parsed_comment_list.append(await Comment.get_comment_from_angler_comment_json(comment_json))
return cls(parsed_comment_list)
@classmethod
async def get_comment_section_from_web_driver(cls, driver: WebDriver) -> 'CommentSection':
offset: int = 0
comment_list: List[Dict[str, Any]] = []
request_id, type_id, item_id, max_comments = await cls._get_request_metadata_from_web_driver(driver)
while max_comments > 0:
request_url: str = urllib.parse.urljoin(
ANGLER_BASE_URL,
'/comment.php?rid={}{}&limit=1000&type={}&item={}'.format(
request_id,
f'&offset={offset}' if offset > 0 else '',
type_id,
item_id
)
)
print(f'Scraping comment URL: {request_url}')
driver.get(request_url)
if soup := BeautifulSoup(driver.page_source, lxml.__name__).find('pre'):
if response := json.loads(soup.text.strip()):
comment_list += response['comment']
offset += 1000
if len(comment_list) < max_comments:
continue
else:
raise ValueError('No JSON response from server for comments.')
break
return await cls.parse_comment_section(comment_list=comment_list)
```
#### File: dataClasses/spot/spotBaitMetadata.py
```python
from dataclasses import dataclass, field
from typing import List, Optional
from dataclasses_json import DataClassJsonMixin
from ff14angler.dataClasses.bait.baitId import BaitId
from ff14angler.dataClasses.fish.fishId import FishId
from ff14angler.dataClasses.spot.spotBaitFishCatchInfo import SpotBaitFishCatchInfo
@dataclass
class SpotBaitMetadata(DataClassJsonMixin):
spot_bait_id: BaitId
spot_angler_bait_fish_catch_info: List[SpotBaitFishCatchInfo] = field(default_factory=list)
spot_angler_bait_total_fish_caught: Optional[int] = None
def update_spot_bait_metadata_with_spot_bait_fish_caught(
self,
caught_count: int,
caught_percent: str,
caught_total: int,
fish_id: FishId
):
if self.spot_angler_bait_total_fish_caught is not None:
if self.spot_angler_bait_total_fish_caught != caught_total:
raise ValueError('Got inconsistent total fish caught value.')
self.spot_angler_bait_total_fish_caught = caught_total
self.spot_angler_bait_fish_catch_info.append(
SpotBaitFishCatchInfo(
spot_angler_fish_caught_count=caught_count,
spot_angler_fish_caught_percentage=caught_percent,
spot_fish_id=fish_id
)
)
```
#### File: ff14angler/network/aiohttpWrapper.py
```python
import aiohttp
from typing import Any, Dict
from asyncio_throttle import Throttler
from ff14angler.exceptions.networkException import NetworkException
class AiohttpWrapper:
def __init__(self, rate_limit: int, duration: int):
"""The value `rate_limit` is max requests per duration. Duration is integer value in seconds."""
self._throttler = Throttler(rate_limit=rate_limit, period=duration)
async def get_bytes_at_url(self, url: str) -> bytes:
async with self._throttler:
try:
async with aiohttp.ClientSession() as session:
print(f'Downloading URL: {url}')
async with session.get(url) as response:
return await response.read()
except aiohttp.ClientError as e:
raise NetworkException(e)
async def get_json_at_url(self, url: str) -> Dict[str, Any]:
async with self._throttler:
try:
async with aiohttp.ClientSession() as session:
print(f'Fetching URL: {url}')
async with session.get(url) as response:
return await response.json()
except aiohttp.ClientError as e:
raise NetworkException(e)
async def get_text_at_url(self, url: str) -> str:
async with self._throttler:
try:
async with aiohttp.ClientSession() as session:
print(f'Fetching URL: {url}')
async with session.get(url) as response:
return await response.text()
except aiohttp.ClientError as e:
raise NetworkException(e)
async def post_json_at_url(self, url: str, item_name: str, json_obj: Dict[str, Any]) -> Dict[str, Any]:
async with self._throttler:
try:
async with aiohttp.ClientSession() as session:
print(f'Fetching URL: {url} : {item_name}')
async with session.post(url, json=json_obj) as response:
return await response.json()
except aiohttp.ClientError as e:
raise NetworkException(e)
```
#### File: ff14angler/scraper/fishScraper.py
```python
import asyncio
import urllib.parse
import lxml # type: ignore
from bs4 import BeautifulSoup # type: ignore
from selenium.common.exceptions import TimeoutException # type: ignore
from selenium.webdriver.chrome.webdriver import WebDriver # type: ignore
from selenium.webdriver.common.by import By # type: ignore
from selenium.webdriver.support.ui import WebDriverWait # type: ignore
from selenium.webdriver.support import expected_conditions # type: ignore
from ff14angler.constants.values import (
ANGLER_BASE_URL,
ANGLER_PAGE_LOAD_WAIT_DURATION,
ANGLER_DELAY_BETWEEN_REQUESTS_DURATION
)
from ff14angler.dataClasses.comment.commentSection import CommentSection
from ff14angler.dataClasses.fish.fish import Fish
from ff14angler.dataClasses.fish.fishProvider import FishProvider
from ff14angler.exceptions.networkException import NetworkException
from ff14angler.scraper.lodestoneImageScraper import LodestoneImageScraper
from ff14angler.network.delayOnReleaseLock import DelayOnReleaseLock
class FishScraper:
@staticmethod
async def update_fish_with_large_icon_url(driver: WebDriver, fish: Fish):
if fish.fish_angler_lodestone_url:
if fish.fish_icon_url is None:
raise ValueError(f'Missing icon url from xivapi: {fish}')
fish.fish_large_icon_url = await LodestoneImageScraper.get_large_icon(
driver=driver,
short_icon_url=fish.fish_icon_url,
lodestone_url=fish.fish_angler_lodestone_url
)
@staticmethod
async def update_fish_desynthesis_items_with_large_icon_url(driver: WebDriver, fish: Fish):
for desynthesis_item in fish.fish_angler_desynthesis_items:
if desynthesis_item.desynthesis_angler_lodestone_url:
if desynthesis_item.desynthesis_icon_url is None:
raise ValueError(f'Missing icon url from xivapi: {desynthesis_item}')
desynthesis_item.desynthesis_large_icon_url = await LodestoneImageScraper.get_large_icon(
driver=driver,
short_icon_url=desynthesis_item.desynthesis_icon_url,
lodestone_url=desynthesis_item.desynthesis_angler_lodestone_url
)
@staticmethod
async def update_fish_involved_recipes_with_large_icon_url(driver: WebDriver, fish: Fish):
for recipe in fish.fish_angler_involved_recipes:
if recipe.recipe_angler_lodestone_url:
if recipe.recipe_icon_url is None:
raise ValueError(f'Missing icon url from xivapi: {recipe}')
recipe.recipe_large_icon_url = await LodestoneImageScraper.get_large_icon(
driver=driver,
short_icon_url=recipe.recipe_icon_url,
lodestone_url=recipe.recipe_angler_lodestone_url
)
@classmethod
async def collect_fish_data(cls, driver: WebDriver):
fish_url_template = urllib.parse.urljoin(ANGLER_BASE_URL, '/fish/')
lock = DelayOnReleaseLock(ANGLER_DELAY_BETWEEN_REQUESTS_DURATION)
for fish_id, fish in FishProvider.fish_holder.items():
angler_url: str = urllib.parse.urljoin(fish_url_template, str(fish_id))
for attempt in range(3):
driver.get('about:blank')
print(f'Scraping page: {angler_url}')
driver.get(angler_url)
try:
WebDriverWait(driver, ANGLER_PAGE_LOAD_WAIT_DURATION).until(
expected_conditions.presence_of_element_located(
(By.CSS_SELECTOR, 'form.comment_form')
)
)
async with lock:
await asyncio.sleep(2)
html: str = driver.page_source
await fish.update_fish_with_comment_section(
await CommentSection.get_comment_section_from_web_driver(driver)
)
break
except (NetworkException, TimeoutException, ValueError,):
if attempt == 2:
raise
await fish.update_fish_with_fish_soup(BeautifulSoup(html, lxml.__name__))
await cls.update_fish_with_large_icon_url(driver, fish)
await cls.update_fish_desynthesis_items_with_large_icon_url(driver, fish)
await cls.update_fish_involved_recipes_with_large_icon_url(driver, fish)
```
#### File: FF14AnglerParser/ff14angler/scraper_main.py
```python
import argparse
import asyncio
import os
from ff14angler.constants.values import config_settings
from ff14angler.dataClasses.cache.xivapiCache import XivapiCache
from ff14angler.scraper.scraper import Scraper
from ff14angler.network.chromeWrapper import ChromeWrapper
from ff14angler.network.xivapiWrapper import XivapiWrapper
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'-e',
'--export-directory',
action='store',
dest='export_directory',
default=None,
help='Directory to place API cache, and game icon images.'
)
config_settings['EXPORT_DIRECTORY'] = (
arg_parser.parse_args().export_directory or config_settings['EXPORT_DIRECTORY']
)
cache_path = os.path.join(config_settings['EXPORT_DIRECTORY'], 'xivapi_cache.json')
try:
with open(cache_path) as fh:
print('Reading API cache into memory...')
XivapiWrapper.cache = XivapiCache.from_json(fh.read())
except FileNotFoundError:
print('No API cache found.')
try:
print('Starting Chrome...')
with ChromeWrapper() as driver:
print('Beginning scraping...')
loop = asyncio.get_event_loop()
loop.run_until_complete(Scraper.main(driver))
finally:
print('Writing API cache to disk...')
with open(cache_path, 'w+') as fh:
fh.write(XivapiWrapper.cache.to_json())
``` |
{
"source": "joshuaspence/unavailable_entities",
"score": 2
} |
#### File: custom_components/unavailable_entities/sensor.py
```python
from typing import Any, Dict, Optional, Set
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_UNIQUE_ID
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType, StateType
import voluptuous as vol
ATTR_ENTITIES = "entities"
DEFAULT_NAME = "Unavailable Entities"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
# pylint: disable=unused-argument
discovery_info: Optional[DiscoveryInfoType] = None,
) -> bool:
name = config.get(CONF_NAME)
unique_id = config.get(CONF_UNIQUE_ID)
add_entities(
[UnavailableEntitiesSensor(hass, name, unique_id)],
update_before_add=True,
)
return True
class UnavailableEntitiesSensor(Entity):
def __init__(
self,
hass: HomeAssistant,
name: Optional[str] = None,
unique_id: Optional[str] = None,
) -> None:
self.hass = hass
self._name = name
self._unique_id = unique_id
self._state: Set[str] = set()
@property
def entity_id(self) -> str:
return "sensor.unavailable_entities"
@property
def extra_state_attributes(self) -> Dict[str, Any]:
return {ATTR_ENTITIES: self._state}
@property
def icon(self) -> str:
if self.state > 0:
return "mdi:alert-circle"
return "mdi:check-circle"
@property
def name(self) -> Optional[str]:
return self._name
@property
def should_poll(self) -> bool:
# TODO: Stop using polling.
return True
@property
def state(self) -> StateType:
return len(self._state)
@property
def unique_id(self) -> Optional[str]:
return self._unique_id
def update(self) -> None:
entities = set()
for state in self.hass.states.all():
if state.entity_id == self.entity_id:
continue
if state.state in ["unavailable", "unknown"]:
entities.add(state.entity_id)
self._state = entities
```
#### File: unavailable_entities/tests/test_sensor.py
```python
from typing import Any, Dict
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
CONF_UNIQUE_ID,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry
from homeassistant.setup import async_setup_component
from custom_components.unavailable_entities.sensor import ATTR_ENTITIES, DEFAULT_NAME
async def setup_test_entities(
hass: HomeAssistant, config: Dict[str, Any] = None
) -> None:
assert await async_setup_component(
hass,
DOMAIN,
{
"sensor": {
"platform": "unavailable_entities",
**(config or {}),
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
async def test_sensor_manual_update(hass: HomeAssistant) -> None:
await async_setup_component(hass, "homeassistant", {})
await setup_test_entities(hass)
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["sensor.unavailable_entities"]},
blocking=True,
)
state = hass.states.get("sensor.unavailable_entities")
assert int(state.state) == 0
assert state.attributes[ATTR_ENTITIES] == set()
sensors = {
"binary_sensor.test": "off",
"media_player.test": "off",
"sensor.test": "off",
}
for sensor_id, sensor_state in sensors.items():
hass.states.async_set(sensor_id, sensor_state)
await hass.async_block_till_done()
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["sensor.unavailable_entities"]},
blocking=True,
)
state = hass.states.get("sensor.unavailable_entities")
assert int(state.state) == 0
assert state.attributes[ATTR_ENTITIES] == set()
for sensor_id in sensors:
hass.states.async_set(sensor_id, "unavailable")
await hass.async_block_till_done()
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["sensor.unavailable_entities"]},
blocking=True,
)
state = hass.states.get("sensor.unavailable_entities")
assert int(state.state) == len(sensors)
assert state.attributes[ATTR_ENTITIES] == sensors.keys()
async def test_sensor_defaults(hass: HomeAssistant) -> None:
await setup_test_entities(hass)
state = hass.states.get("sensor.unavailable_entities")
assert state
assert state.entity_id == "sensor.unavailable_entities"
assert state.name == DEFAULT_NAME
assert state.attributes.get(ATTR_ICON) == "mdi:check-circle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
async def test_sensor_customizations(hass: HomeAssistant) -> None:
sensor_name = "<NAME>"
await setup_test_entities(hass, {CONF_NAME: sensor_name})
state = hass.states.get("sensor.unavailable_entities")
assert state
assert state.entity_id == "sensor.unavailable_entities"
assert state.name == sensor_name
async def test_sensor_unique_id(hass: HomeAssistant) -> None:
unique_id = "abc123"
await setup_test_entities(hass, {CONF_UNIQUE_ID: unique_id})
registry = entity_registry.async_get(hass)
assert registry.async_get_entity_id("sensor", "unavailable_entities", unique_id)
``` |
{
"source": "joshuaspence/vtctrans",
"score": 2
} |
#### File: vtctrans/vtctrans/__init__.py
```python
import sys
from vtctrans.varnishtest import VarnishTest
def main():
vtc = VarnishTest()
argv = sys.argv
argc = len(argv)
opt = ''
if(argv == 1):
exit
results = vtc.execVarnishTest(argv[1:])
for result in results:
if (result['result'] != 'passed'):
exit(1)
exit(0)
``` |
{
"source": "joshua-s/punch",
"score": 2
} |
#### File: punch/punch/file_configuration.py
```python
from __future__ import print_function, absolute_import, division
import six
import jinja2
class FileConfiguration(object):
def __init__(self, filepath, local_variables, global_variables=None):
self.config = {}
if global_variables:
self.config.update(global_variables)
new_local_variables = {}
env = jinja2.Environment(undefined=jinja2.DebugUndefined)
for key, value in local_variables.items():
if six.PY2:
value = value.decode('utf8')
template = env.from_string(value)
new_local_variables[key] = template.render(
GLOBALS=global_variables)
self.config.update(new_local_variables)
self.path = filepath
@classmethod
def from_dict(cls, dic, global_variables):
return cls(dic['path'], dic, global_variables)
```
#### File: punch/vcs_repositories/hg_repo.py
```python
from __future__ import print_function, absolute_import, division
import os
import re
import six
from punch.vcs_repositories import vcs_repo as vr
from punch.vcs_repositories import exceptions as e
class HgRepo(vr.VCSRepo):
DEFAULT_BRANCH = 'default'
def __init__(self, working_path, config_obj):
if six.PY2:
super(HgRepo, self).__init__(working_path, config_obj)
else:
super().__init__(working_path, config_obj)
self.branch = self.config_obj.options.get('branch', 'default')
self._recorded_branch = None
def get_current_branch(self):
stdout = self._run([self.command, "branch"])
branch = stdout.replace("\n", "")
return branch
def get_branches(self):
stdout = self._run([self.command, "branches"])
return {self._parse_branch_line(l) for l in stdout.splitlines()}
def get_tags(self):
tags_str = self._run([self.command, "tags"])
tags = map(
lambda l: l.rsplit(" ", 1)[0].strip(),
tags_str.splitlines()
)
return "\n".join(tags)
def get_summary(self):
output = self._run([self.command, "summary"])
keys = {"branch", "commit", "update"}
summary = {}
for l in output.splitlines():
try:
k, body = l.split(": ", 1)
if k in keys:
summary[k] = body
except ValueError:
pass
return summary
def pre_start_release(self):
if not self._is_clean():
raise e.RepositoryStatusError(
"Cannot update default while repository" +
" contains uncommitted changes")
self._recorded_branch = self.get_current_branch()
self._change_branch(self.branch)
branch = self.get_current_branch()
if branch != self.branch:
raise e.RepositoryStatusError(
"Current branch shall be {} but is {}".format(
self.branch, branch))
def start_release(self):
pass
def finish_release(self):
self.get_current_branch()
try:
if self._is_clean():
return
command_line = [self.command, "commit"]
command_line.extend(["-m", self.config_obj.commit_message])
self._run(command_line)
tag = self._configured_tag()
self.tag(tag)
finally:
self._recover_branch()
def tag(self, tag):
self._run([self.command, "tag", tag])
def _recover_branch(self):
if self._recorded_branch is not None:
self._change_branch(self._recorded_branch)
self._recorded_branch = None
def _change_branch(self, branch):
self._run([self.command, "update", branch])
def _check_config(self):
# Tag names cannot contain spaces
tag = self.config_obj.options.get('tag', '')
if ' ' in tag:
raise e.RepositoryConfigurationError(
"""You specified "'tag': {}".""".format(tag) +
" Tag names cannot contain spaces")
if re.match("^\d+$", tag):
raise e.RepositoryConfigurationError(
"""You specified "'tag': {}".""".format(tag) +
" Tag names cannot be just digits")
def _check_system(self):
if six.PY2:
super(HgRepo, self)._check_system()
else:
super()._check_system()
if not os.path.exists(os.path.join(self.working_path, '.hg')):
raise e.RepositorySystemError(
"The current directory {} is not a Hg repository".format(
self.working_path))
def _set_command(self):
self.commands = ['hg']
self.command = 'hg'
def _is_clean(self):
return self.get_summary()["commit"].endswith("(clean)")
@classmethod
def _parse_branch_line(cls, line):
return re.match("(?P<tag>.+)\s+\d+:.+$", line).group("tag").strip()
def _configured_tag(self):
try:
return self.config_obj.options['tag']
except KeyError:
return self.config_obj.options['new_version']
```
#### File: punch/tests/test_file_configuration.py
```python
import pytest
from punch import file_configuration as fc
@pytest.fixture
def global_variables():
return {
'serializer': '{{ major }}.{{ minor }}.{{ patch }}',
'mark': 'just a mark'
}
@pytest.fixture
def local_variables():
return {
'serializer': '{{ major }}.{{ minor }}'
}
@pytest.fixture
def file_configuration_dict():
return {
'path': 'pkg/__init__.py',
'serializer': '{{ major }}.{{ minor }}'
}
def test_file_configuration_from_string_local_variables_take_precedence(
local_variables, global_variables):
fconf = fc.FileConfiguration(
'pkg/__init__.py',
local_variables,
global_variables
)
assert fconf.path == 'pkg/__init__.py'
assert fconf.config['serializer'] == '{{ major }}.{{ minor }}'
assert fconf.config['mark'] == 'just a mark'
def test_file_configuration_from_string_can_include_global_variables(
global_variables):
local_variables = {
'serializer': '__version__ = {{GLOBALS.serializer}}'
}
fconf = fc.FileConfiguration(
'pkg/__init__.py',
local_variables,
global_variables
)
assert fconf.path == 'pkg/__init__.py'
assert fconf.config['serializer'] == \
'__version__ = {{ major }}.{{ minor }}.{{ patch }}'
assert fconf.config['mark'] == 'just a mark'
def test_file_conf_fr_str_path_cannot_be_overridden_by_global_variables(
local_variables, global_variables):
global_variables['path'] = 'a/new/path'
fconf = fc.FileConfiguration(
'pkg/__init__.py',
local_variables,
global_variables
)
assert fconf.path == 'pkg/__init__.py'
def test_file_conf_fr_str_path_cannot_be_overridden_by_local_variables(
local_variables, global_variables):
local_variables['path'] = 'a/new/path'
fconf = fc.FileConfiguration(
'pkg/__init__.py',
local_variables,
global_variables
)
assert fconf.path == 'pkg/__init__.py'
def test_file_configuration_from_dict_local_variables_take_precedence(
file_configuration_dict, global_variables):
fconf = fc.FileConfiguration.from_dict(
file_configuration_dict,
global_variables
)
assert fconf.path == 'pkg/__init__.py'
assert fconf.config['serializer'] == '{{ major }}.{{ minor }}'
assert fconf.config['mark'] == 'just a mark'
def test_file_conf_fr_dict_path_cannot_be_overridden_by_global_variables(
file_configuration_dict, global_variables):
global_variables['path'] = 'a/new/path'
fconf = fc.FileConfiguration.from_dict(
file_configuration_dict,
global_variables
)
assert fconf.path == 'pkg/__init__.py'
```
#### File: punch/tests/test_vcs_configuration.py
```python
import pytest
from punch import vcs_configuration as vc
@pytest.fixture
def global_variables():
return {
'serializer': '{{ major }}.{{ minor }}.{{ patch }}',
'mark': 'just a mark'
}
@pytest.fixture
def vcs_configuration_dict():
return {
'name': 'git',
'commit_message': "Version updated to {{ new_version }}",
'finish_release': True,
'options': {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': '',
}
}
@pytest.fixture
def special_variables():
return {
'current_version': '1.2.3',
'new_version': '1.3.0'
}
def test_vcs_configuration_from_string(
vcs_configuration_dict, global_variables, special_variables):
vcsconf = vc.VCSConfiguration(vcs_configuration_dict['name'],
vcs_configuration_dict['options'],
global_variables,
special_variables,
vcs_configuration_dict['commit_message']
)
expected_options = {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': '',
'current_version': '1.2.3',
'new_version': '1.3.0'
}
assert vcsconf.name == 'git'
assert vcsconf.commit_message == "Version updated to 1.3.0"
assert vcsconf.finish_release is True
assert vcsconf.options == expected_options
def test_vcs_configuration_from_dict(
vcs_configuration_dict, global_variables, special_variables):
vcsconf = vc.VCSConfiguration.from_dict(
vcs_configuration_dict,
global_variables,
special_variables
)
expected_options = {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': '',
'current_version': '1.2.3',
'new_version': '1.3.0'
}
assert vcsconf.name == 'git'
assert vcsconf.commit_message == "Version updated to 1.3.0"
assert vcsconf.finish_release is True
assert vcsconf.options == expected_options
def test_vcs_configuration_from_dict_without_commit_message(
vcs_configuration_dict, global_variables, special_variables):
vcs_configuration_dict.pop('commit_message')
vcsconf = vc.VCSConfiguration.from_dict(
vcs_configuration_dict,
global_variables,
special_variables
)
expected_options = {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': '',
'current_version': '1.2.3',
'new_version': '1.3.0'
}
assert vcsconf.name == 'git'
assert vcsconf.commit_message == "Version updated 1.2.3 -> 1.3.0"
assert vcsconf.finish_release is True
assert vcsconf.options == expected_options
def test_vcs_configuration_from_dict_without_finish_release(
vcs_configuration_dict, global_variables, special_variables):
vcs_configuration_dict.pop('finish_release')
vcsconf = vc.VCSConfiguration.from_dict(
vcs_configuration_dict,
global_variables,
special_variables
)
expected_options = {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': '',
'current_version': '1.2.3',
'new_version': '1.3.0'
}
assert vcsconf.name == 'git'
assert vcsconf.commit_message == "Version updated to 1.3.0"
assert vcsconf.finish_release is True
assert vcsconf.options == expected_options
def test_vcs_configuration_from_dict_without_options(
vcs_configuration_dict, global_variables, special_variables):
vcs_configuration_dict.pop('options')
vcsconf = vc.VCSConfiguration.from_dict(
vcs_configuration_dict,
global_variables,
special_variables
)
assert vcsconf.name == 'git'
assert vcsconf.commit_message == "Version updated to 1.3.0"
assert vcsconf.finish_release is True
def test_vcs_configuration_from_dict_can_use_global_variables(
vcs_configuration_dict, global_variables, special_variables):
vcs_configuration_dict['commit_message'] = "Mark: {{ mark }}"
vcsconf = vc.VCSConfiguration.from_dict(
vcs_configuration_dict,
global_variables,
special_variables
)
assert vcsconf.commit_message == "Mark: just a mark"
def test_vcs_configuration_from_dict_special_variables_take_precedence(
vcs_configuration_dict, global_variables, special_variables):
vcs_configuration_dict['commit_message'] = "{{ current_version }}"
global_variables['current_version'] = "5.0.0"
vcsconf = vc.VCSConfiguration.from_dict(
vcs_configuration_dict,
global_variables,
special_variables
)
assert vcsconf.commit_message == "1.2.3"
def test_vcs_configuration_from_dict_options_templates_are_processed(
vcs_configuration_dict, global_variables, special_variables):
vcs_configuration_dict['options']['annotation_message'] = \
"Updated {{ current_version}} -> {{ new_version }}"
vcsconf = vc.VCSConfiguration.from_dict(
vcs_configuration_dict,
global_variables,
special_variables
)
expected_options = {
'make_release_branch': False,
'annotate_tags': False,
'annotation_message': 'Updated 1.2.3 -> 1.3.0',
'current_version': '1.2.3',
'new_version': '1.3.0'
}
assert vcsconf.options == expected_options
```
#### File: tests/vcs_repositories/test_hg_repo.py
```python
import subprocess
import os
import pytest
import sys
from six.moves import configparser
from punch import vcs_configuration as vc
from punch.vcs_repositories import hg_repo as hr, exceptions as re
from tests.conftest import safe_devnull
pytestmark = pytest.mark.slow
def hg_repo_add_file(temp_hg_dir, fname, content="", out=None):
if out is None:
out = safe_devnull()
with open(os.path.join(temp_hg_dir, fname), "w") as f:
f.write(content)
subprocess.check_call(["hg", "add", fname], cwd=temp_hg_dir, stdout=out)
def hg_repo_add_branch(temp_hg_dir, branch, message=None, out=None):
if out is None:
out = sys.stdout
if message is None:
message = "Starting new branch " + branch
subprocess.check_call(
["hg", "branch", "-f", branch],
cwd=temp_hg_dir,
stdout=out
)
subprocess.check_call(
["hg", "commit", "-m", message],
cwd=temp_hg_dir,
stdout=out
)
def hg_repo_change_branch(temp_hg_dir, branch, out=None):
if out is None:
out = sys.stdout
subprocess.check_call(
["hg", "update", branch],
cwd=temp_hg_dir,
stdout=out
)
def hg_log(temp_hg_dir):
p = subprocess.Popen(["hg", "log"], cwd=temp_hg_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.decode('utf8')
@pytest.fixture
def temp_empty_hg_dir(temp_empty_dir):
subprocess.check_call(["hg", "init", temp_empty_dir])
hgrc = os.path.join(temp_empty_dir, ".hg", "hgrc")
cp = configparser.ConfigParser()
cp.read(hgrc)
cp.add_section("ui")
cp.set("ui", "username", "PyTest <<EMAIL>>")
with open(hgrc, "w+") as f:
cp.write(f)
return temp_empty_dir
@pytest.fixture
def temp_hg_dir(temp_empty_hg_dir, safe_devnull):
with open(os.path.join(temp_empty_hg_dir, "README.md"), "w") as f:
f.writelines(["# Test file", "This is just a test file for punch"])
subprocess.check_call(["hg", "add", "README.md"],
cwd=temp_empty_hg_dir, stdout=safe_devnull)
subprocess.check_call(
["hg", "commit", "-m", "Initial addition"],
cwd=temp_empty_hg_dir,
stdout=safe_devnull
)
return temp_empty_hg_dir
@pytest.fixture
def hg_other_branch(temp_hg_dir):
hg_repo_add_branch(temp_hg_dir, "other")
hg_repo_change_branch(temp_hg_dir, hr.HgRepo.DEFAULT_BRANCH)
return temp_hg_dir
@pytest.fixture
def empty_vcs_configuration():
return vc.VCSConfiguration(
'hg', {}, {}, {'current_version': 'a', 'new_version': 'b'}
)
@pytest.fixture
def other_branch_vcs_configuration():
return vc.VCSConfiguration(
'hg', {"branch": "other"}, {},
{'current_version': 'a', 'new_version': 'b'}
)
@pytest.fixture
def ready_to_finish_repo(temp_hg_dir, **kwargs):
release_name = "1.0"
commit_message = "A commit message"
config = vc.VCSConfiguration(
'git', kwargs, global_variables={},
special_variables={'new_version': release_name},
commit_message=commit_message
)
repo = hr.HgRepo(temp_hg_dir, config)
repo.pre_start_release()
repo.start_release()
hg_repo_add_file(temp_hg_dir, "version.txt", release_name + "\n")
return repo
def test_init(temp_empty_hg_dir, empty_vcs_configuration):
repo = hr.HgRepo(temp_empty_hg_dir, empty_vcs_configuration)
assert repo.working_path == temp_empty_hg_dir
def test_init_with_uninitialized_dir(temp_empty_dir, empty_vcs_configuration):
with pytest.raises(re.RepositorySystemError) as exc:
hr.HgRepo(temp_empty_dir, empty_vcs_configuration)
assert str(exc.value) == \
"The current directory {} is not a Hg repository".format(
temp_empty_dir)
def test_get_current_branch(temp_hg_dir, empty_vcs_configuration):
repo = hr.HgRepo(temp_hg_dir, empty_vcs_configuration)
assert repo.get_current_branch() == repo.DEFAULT_BRANCH
def test_get_tags(temp_hg_dir, empty_vcs_configuration):
repo = hr.HgRepo(temp_hg_dir, empty_vcs_configuration)
assert repo.get_tags() == 'tip'
def test_pre_start_release(temp_hg_dir, empty_vcs_configuration):
repo = hr.HgRepo(temp_hg_dir, empty_vcs_configuration)
repo.pre_start_release()
assert repo.get_current_branch() == repo.DEFAULT_BRANCH
def test_pre_start_release_start_from_different_branch(
temp_hg_dir, empty_vcs_configuration):
hg_dir = temp_hg_dir
repo = hr.HgRepo(hg_dir, empty_vcs_configuration)
hg_repo_add_branch(hg_dir, "other")
repo.pre_start_release()
assert repo.get_current_branch() == repo.DEFAULT_BRANCH
def test_pre_start_release_should_use_other_branch(
temp_hg_dir, other_branch_vcs_configuration):
hg_dir = temp_hg_dir
repo = hr.HgRepo(hg_dir, other_branch_vcs_configuration)
hg_repo_add_branch(hg_dir, "other")
repo.pre_start_release()
hg_repo_change_branch(hg_dir, repo.DEFAULT_BRANCH)
repo.pre_start_release()
assert repo.get_current_branch() == "other"
def test_pre_start_release_in_unclean_state(
temp_hg_dir, empty_vcs_configuration):
hg_dir = temp_hg_dir
with open(os.path.join(hg_dir, "README.md"), "w") as f:
f.writelines(["Uncommitted lines"])
repo = hr.HgRepo(hg_dir, empty_vcs_configuration)
with pytest.raises(re.RepositoryStatusError):
repo.pre_start_release()
assert repo.get_current_branch() == repo.DEFAULT_BRANCH
def test_pre_start_release_starting_from_different_branch_in_unclean_state(
temp_hg_dir, empty_vcs_configuration):
hg_dir = temp_hg_dir
hg_repo_add_branch(hg_dir, "other")
with open(os.path.join(hg_dir, "README.md"), "w") as f:
f.writelines(["Uncommitted lines"])
repo = hr.HgRepo(hg_dir, empty_vcs_configuration)
with pytest.raises(re.RepositoryStatusError):
repo.pre_start_release()
assert repo.get_current_branch() == "other"
def test_start_release_should_be_in_defined_branch(
hg_other_branch, other_branch_vcs_configuration):
repo = hr.HgRepo(hg_other_branch, other_branch_vcs_configuration)
repo.pre_start_release()
repo.start_release()
assert repo.get_current_branch() == "other"
def test_finish_release_without_changes(
hg_other_branch, other_branch_vcs_configuration):
repo = hr.HgRepo(hg_other_branch, other_branch_vcs_configuration)
repo.pre_start_release()
repo.start_release()
repo.finish_release()
assert repo.get_current_branch() == repo.DEFAULT_BRANCH
assert 'b' not in repo.get_tags()
def test_finish_should_recover_start_branch(
hg_other_branch, other_branch_vcs_configuration):
hg_repo_add_branch(hg_other_branch, "third")
repo = hr.HgRepo(hg_other_branch, other_branch_vcs_configuration)
repo.pre_start_release()
repo.start_release()
repo.finish_release()
assert repo.get_current_branch() == "third"
def test_finish_release_with_message(ready_to_finish_repo):
d = ready_to_finish_repo.working_path
config = ready_to_finish_repo.config_obj
commit_message = config.commit_message
ready_to_finish_repo.finish_release()
log = hg_log(d)
assert commit_message in log
def test_finish_release_without_release_branch(ready_to_finish_repo):
config = ready_to_finish_repo.config_obj
release_name = config.options['new_version']
ready_to_finish_repo.finish_release()
assert release_name not in ready_to_finish_repo.get_branches()
def test_finish_write_tag(ready_to_finish_repo):
config = ready_to_finish_repo.config_obj
release_name = config.options['new_version']
ready_to_finish_repo.finish_release()
assert release_name in ready_to_finish_repo.get_tags()
def test_finish_release_with_custom_tag(temp_hg_dir):
tag = "Version_{}".format("1.0")
repo = ready_to_finish_repo(temp_hg_dir, tag=tag)
repo.finish_release()
assert tag in repo.get_tags()
def test_finish_release_custom_tag_cannot_contain_spaces(temp_hg_dir):
release_name = "1.0"
commit_message = "A commit message"
tag = "Version {}".format(release_name)
config = vc.VCSConfiguration('hg', {'tag': tag}, global_variables={},
special_variables={
'new_version': release_name},
commit_message=commit_message)
with pytest.raises(re.RepositoryConfigurationError):
hr.HgRepo(temp_hg_dir, config)
def test_finish_release_custom_tag_cannot_be_a_number(temp_hg_dir):
release_name = "1.0"
commit_message = "A commit message"
tag = "12234"
config = vc.VCSConfiguration('hg', {'tag': tag}, global_variables={},
special_variables={
'new_version': release_name},
commit_message=commit_message)
with pytest.raises(re.RepositoryConfigurationError):
hr.HgRepo(temp_hg_dir, config)
def test_start_summary(temp_hg_dir, empty_vcs_configuration):
repo = hr.HgRepo(temp_hg_dir, empty_vcs_configuration)
repo.pre_start_release()
assert repo.get_summary() == {"branch": "default", "commit": "(clean)",
"update": "(current)"}
def test_get_branches(hg_other_branch, empty_vcs_configuration):
repo = hr.HgRepo(hg_other_branch, empty_vcs_configuration)
assert {"default", "other"} == repo.get_branches()
def test_tag(temp_hg_dir, empty_vcs_configuration):
repo = hr.HgRepo(temp_hg_dir, empty_vcs_configuration)
repo.tag("just_a_tag")
assert "just_a_tag" in repo.get_tags()
```
#### File: tests/vcs_use_cases/test_tag_use_case.py
```python
import mock
from punch.vcs_use_cases import tag
def test_pre_tag():
repo = mock.Mock()
use_case = tag.VCSTagUseCase(repo)
use_case.tag("just_a_tag")
assert repo.tag.called_with("just_a_tag")
``` |
{
"source": "joshuass/PyHail",
"score": 3
} |
#### File: PyHail/pyhail/hacc.py
```python
import numpy as np
from pyhail import common
def main(radar, fz_level, pressure, z_fname, hsda_fname, mesh_fname, sp_reflectivity_threshold=55, heights_fieldname='gate_z'):
"""
Hail Accumulation defined by Robinson et al. 2018 and Kalina et al. 2016.
If the heights field exists, this will be used and save a small amount of computation time.
Parameters:
===========
radar : object
Py-ART radar object.
fz_level: int
wet bulb freezing level (m)
pressure: float (1,)
mean pressure between the surface and the height of the 0C wet-bulb temperature
z_fname: str
reflectivity field name
hsda_fname: str
field name for HSDR
mesh_fname: str
field name for MESH
sp_reflectivity_threshold: float
value used to threshold reflectivity for single pol analysis
Returns:
hAcc_meta: dict
pyart field dictionary containing hAcc dataset
"""
Z = radar.fields[z_fname]["data"]
if np.ma.is_masked(Z):
Z = Z.filled(0)
if hsda_fname is None:
#use a simple single pol HCA for hail (fixed threshold)
hail_hca = Z >= sp_reflectivity_threshold
else:
#use hsda to determine hail
hail_hca = radar.fields[hsda_fname]["data"]
if np.ma.is_masked(hail_hca):
hail_hca = hail_hca.filled(0)
#load mesh
mesh = radar.get_field(0, mesh_fname)
if np.ma.is_masked(mesh):
mesh = mesh.filled(0)
# calculate height
try:
heights = radar.fields[heights_fieldname]['data']
except:
rg, azg = np.meshgrid(radar.range["data"], radar.azimuth["data"])
rg, eleg = np.meshgrid(radar.range["data"], radar.elevation["data"])
_, _, heights = common.antenna_to_cartesian(rg / 1000, azg, eleg)
n = 0.64 # packing density of monodisperse spheres (Kalina et al. 2016)
ph = 900 # density of ice (kg m-3)
epsilon = 0.814
Ze = 10.0 ** (Z / 10.0) # convert Z to Ze
IWC = (
(4.4 * 10 ** -5) * Ze ** (0.71) / 1000
) # Ice Water Content (kg m-3) derived from Ze follow Heysfield and Miller 1998
# remove IWC values where hail_hca is not hail (less than 1)
IWC[hail_hca < 1] = 0
# remove IWC values where temperature is at or below 0
IWC[heights > fz_level] = 0
# get lowest valid IWC
# insert sweeps into 3D array (el, az, rg)
el_sort_idx = np.argsort(radar.fixed_angle["data"])
az = radar.get_azimuth(0)
rg = radar.range["data"]
IWC_3d = np.ma.zeros((len(el_sort_idx), len(az), len(rg)))
for i, el_idx in enumerate(el_sort_idx):
IWC_3d[i, :, :] = IWC[radar.get_slice(el_idx)]
# mask zero values
IWC_3d_masked = np.ma.masked_array(IWC_3d, IWC_3d == 0)
data_shape = IWC_3d_masked.shape
# find the lowest unmasked value by first finding edges
edges = np.ma.notmasked_edges(IWC_3d_masked, axis=0)
# use first edge on axis 0 (lowest in height)
IWC_lowest_valid = np.zeros_like(mesh)
IWC_lowest_valid[edges[0][1], edges[0][2]] = IWC_3d_masked[edges[0]]
# pressure correction from Heysmfield and Write (2014)
PC = (1000 / pressure) ** 0.545
# diameter-fall speed relation from Heysmfield and Wright (2014), units of cm/s
Vt = 488 * (mesh / 10) ** 0.84 * PC
# calculate LASH (units of cm/s)
hAcc = (1 / epsilon) * (1 / (n * ph)) * IWC_lowest_valid * Vt
hAcc = hAcc * 60 # convert cm/s to cm/min
# hAcc is only valid at the surface, to represent it in pyart radar objects, insert it into the lowest sweep
hAcc_field = np.zeros_like(radar.fields[z_fname]["data"])
hAcc_field[radar.get_slice(el_sort_idx[0])] = hAcc
hAcc_meta = {
"data": hAcc_field,
"units": "cm/min",
"long_name": "hail accumulation",
"description": "Hail Accumulation Retrieval developed by Wallace et al. (2019) doi:10.1175/WAF-D-18-0053.1",
"comments": "only valid in the lowest sweep",
}
return hAcc_meta
``` |
{
"source": "joshua-sterner/stockwell_transform",
"score": 3
} |
#### File: stockwell_transform/stockwell/smt.py
```python
import sys
import numpy as npy
from numpy import arange, sin, sqrt, pi
from st import st
from math import sqrt
def calcK(bw, N, srate):
"""Calculate K for a given bandwidth, length, and sampling rate.
bw = 2p * fr, where fr = srate / N (frequency resolution).
p specifies the half bandwidth in bins (fr units).
K = 2p - 1"""
K = int(bw * float(N) / srate + .5) - 1
if K < 1: K = 1
return K
def calcbw(K, N, srate):
"""Calculate the bandwidth given K."""
return float(K + 1) * srate / N
# Precompute the tapers.
def calc_tapers(K, N):
return list(map(lambda k, N = N: sine_taper(k, N), npy.arange(K)))
# Multitaper Stockwell transform.
def mtst(K, tapers, x, lo, hi):
N = len(x)
K2 = float(K * K)
s = 0.
n = 0.
for k in range(K):
X = st(tapers[k] * x, int(lo), int(hi))
mu = 1. - k * k / K2
s += mu * abs(X)**2
n += mu
s *= N / n
return s
# Riedel & Sidorenko sine tapers.
def sine_taper(k, N):
"Compute the kth sine taper of length N"
s = sqrt(2. / (N + 1))
d = arange(N, dtype = 'd')
return s * sin(pi * (k + 1) * (d + 1.) / (N + 1))
``` |
{
"source": "joshua-sterner/torch_DCEC",
"score": 3
} |
#### File: joshua-sterner/torch_DCEC/ssim_matrix.py
```python
from pathlib import Path
from skimage.metrics import structural_similarity as ssim
from skimage.metrics import mean_squared_error
import numpy as np
import argparse
from PIL import Image
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('--image_dir', type=str, required=True)
argparser.add_argument('--image_list_file', type=str, required=True)
argparser.add_argument('--ssim_matrix_file', type=str, required=True)
argparser.add_argument('--mse_matrix_file', type=str, required=True)
argparser.add_argument('--image_symlink_dir', type=str, required=True)
args = argparser.parse_args()
image_dir = Path(args.image_dir)
images = []
for image in image_dir.glob('**/*'):
if image.is_dir():
continue
images.append(image)
# Save image list so we can identify images later
images.sort()
image_list_file = open(args.image_list_file, 'w')
zfill_len = len(str(len(images)))
for i in range(len(images)):
image_list_file.write(f'{i}, {images[i]}\n')
symlink_dir = Path(args.image_symlink_dir) / Path(str(i).zfill(zfill_len))
symlink_dir.mkdir(exist_ok = True, parents=True)
symlink_target = Path('../'*len(symlink_dir.parts)) / images[i]
(symlink_dir / images[i].name).symlink_to(symlink_target)
image_list_file.close()
ssim_matrix_file = open(args.ssim_matrix_file, 'w')
mse_matrix_file = open(args.mse_matrix_file, 'w')
for i in range(len(images)):
i_img = np.array(Image.open(images[i]))
for j in range(i+1, len(images)):
j_img = np.array(Image.open(images[j]))
ssim_matrix_file.write(str(ssim(i_img, j_img, multichannel=True))+',')
mse_matrix_file.write(str(mean_squared_error(i_img, j_img))+',')
ssim_matrix_file.write('\n')
mse_matrix_file.write('\n')
ssim_matrix_file.close()
mse_matrix_file.close()
if __name__ == '__main__':
main()
``` |
{
"source": "joshua-stone/OpenRecipeBook-Ebook",
"score": 3
} |
#### File: lib/asciidocpresenters/recipe.py
```python
from helpers import generate_link, generate_temperature, ref_encode
import units
class RecipeAsciidocPresenter(object):
def __init__(self, recipe):
self._data = recipe.data
@property
def data(self):
return self._data
@property
def config_id(self):
rendered = f"[[{ref_encode(self.data['config_id'])}]]"
return rendered
@property
def name(self):
rendered = f"=== {self.data['name']}"
return rendered
@property
def summary(self):
if self.data['summary']:
rendered = f"\nSummary: {self.data['summary']}\n"
else:
rendered = ''
return rendered
@property
def servings(self):
rendered = f"Yield: {self.data['servings']}\n"
return rendered
@property
def preptime(self):
rendered = f"Prep Time: {self.data['preptime']}\n"
return rendered
@property
def cooktime(self):
rendered = f"Cook Time: {self.data['cooktime']}\n"
return rendered
@property
def equipment(self):
equipment = []
for item in self.data['equipment']:
link = generate_link(item)
equipment.append('* ' + link)
rendered = 'Equipment:\n\n' + '\n'.join(equipment) + '\n'
return rendered
@property
def ingredients(self):
ingredients = []
for item in self.data['ingredients']:
name = item['name']
quantity = units.parse_amount_with_unit(item['quantity'])
if quantity.units == units.NO_UNIT:
text = f"{quantity.magnitude:g} {name}"
else:
text = f"{quantity:~g} of {name}"
if 'link' in item:
link = item['link']
if link.startswith('equipment:') or link.startswith('ingredient:') or link.startswith('recipe:'):
line = f"* <<{ref_encode(link)}, {text}>>"
elif link.startswith('http://') or link.startswith('https://'):
line = f'* {link}[{text}]'
else:
line = f'* {text}'
else:
line = f'* {text}'
ingredients.append(line)
rendered = 'Ingredients:\n\n' + '\n'.join(ingredients) + '\n'
return rendered
@property
def directions(self):
steps = []
for direction in self.data['directions']:
step = '. ' + direction['step']
if 'note' in direction.keys():
step += ' +\n Note: ' + direction['note']
steps.append(generate_temperature(step))
rendered = 'Steps:\n\n' + '\n'.join(steps)
return rendered
@property
def notes(self):
notes = []
for note in self.data['notes']:
notes.append('* ' + note)
if notes:
rendered = '\nNotes:\n\n' + '\n'.join(notes) + '\n\n'
else:
rendered = ''
return rendered
def render(self):
recipe_parts = [
self.config_id,
self.name,
self.summary,
self.servings,
self.preptime,
self.cooktime,
self.equipment,
self.ingredients,
self.directions,
self.notes,
'<<<\n'
]
return '\n'.join(recipe_parts)
```
#### File: OpenRecipeBook-Ebook/lib/recipe.py
```python
class Recipe(object):
def __init__(self, config_id='', name='', summary='', servings=1, preptime='1 min', cooktime='1 min', equipment=[],
ingredients=[], steps=[], directions=[], notes=[], tags=[]):
self._data = {
'config_id': config_id,
'name': name,
'summary': summary,
'servings': servings,
'preptime': preptime,
'cooktime': cooktime,
'equipment': equipment,
'ingredients': ingredients,
'steps': steps,
'directions': directions,
'notes': notes,
'tags': tags
}
@property
def data(self):
return self._data
def config_id(self, config_id):
recipe = join_params(self.data, {'config_id': config_id})
return self.__class__(**recipe)
def name(self, name):
recipe = join_params(self.data, {'name': name})
return self.__class__(**recipe)
def summary(self, name):
recipe = join_params(self.data, {'summary': summary})
return self.__class__(**recipe)
def servings(self, servings):
recipe = join_params(self.data, {'servings': servings})
return self.__class__(**recipe)
def preptime(self, preptime):
recipe = join_params(self.data, {'preptime': preptime})
return self.__class__(**updated)
def cooktime(self, cooktime):
recipe = join_params(self.data, {'cooktime': cooktime})
return self.__class__(**recipe)
def equipment(self, equipment):
recipe = join_params(self.data, {'equipment': equipment})
return self.__class__(**updated)
def ingredients(self, ingredients):
recipe = join_params(self.data, {'ingredients': ingredients})
return self.__class__(**updated)
def steps(self, steps):
recipe = join_params(self.data, {'step': steps})
return self.__class__(**updated)
def notes(self, notes):
recipe = join_params(self.data, {'notes': notes})
return self.__class__(**recipe)
def tags(self, tags):
recipe = join_params(self.data, {'tags': tags})
return self.__class__(**recipe)
```
#### File: lib/scaffold_generators/ingredient.py
```python
from yaml import safe_dump
class StoreGenerator:
def __init__(self):
self.name = None
self.link = None
def prompt_name(self):
self.name = input('Enter store name (leave blank to skip): ')
return bool(self.name)
def prompt_link(self):
self.link = input('Enter store URL: ')
return True
def run_prompt_sequence(self):
return self.prompt_name() and self.prompt_link()
def to_renderable_dict(self):
return {
'name': self.name,
'link': self.link
}
class ProductGenerator:
def __init__(self):
self.name = None
self.stores = []
def prompt_name(self):
self.name = input('Enter product name (leave blank to skip): ')
return bool(self.name)
def prompt_stores(self):
while True:
store = StoreGenerator()
print(f"Add store for {self.name}?")
if not store.run_prompt_sequence():
print(f"Done adding stores to {self.name}.")
break
self.stores.append(store)
print(f"Finished store: {store.name}.\n")
return True
def run_prompt_sequence(self):
return self.prompt_name() and self.prompt_stores()
def to_renderable_dict(self):
return {
'name': self.name,
'stores': list(map(
lambda store: store.to_renderable_dict(),
self.stores
))
}
class IngredientGenerator:
def __init__(self):
self.id = None
self.name = None
self.products = []
def prompt_id(self):
while True:
self.id = input('Enter ingredient ID: ')
if self.id:
break
print("An ingredient ID is required!")
return True
def prompt_name(self):
while True:
self.name = input('Enter ingredient name: ')
if self.name:
break
print("An ingredient name is required!")
return True
def prompt_products(self):
while True:
product = ProductGenerator()
print(f"Add a product listing for {self.name}?")
if not product.run_prompt_sequence():
print(f"Done adding products to {self.name}.")
break
self.products.append(product)
print(f"Finished product {product.name}.\n")
return True
def run_prompt_sequence(self):
return self.prompt_id() and self.prompt_name() and self.prompt_products()
def to_renderable_dict(self):
return {
'id': self.id,
'name': self.name,
'products': list(map(
lambda product: product.to_renderable_dict(),
self.products
))
}
INGREDIENTS_ROOT = 'src/config/ingredients'
def run_ingredient_scaffold_generator(file_name):
output_path = f'{INGREDIENTS_ROOT}/{file_name}.yml'
ingredient = IngredientGenerator()
if not ingredient.run_prompt_sequence():
return False
output = safe_dump(ingredient.to_renderable_dict(), sort_keys = False)
with open(output_path, 'w') as output_file:
output_file.write(output)
return True
``` |
{
"source": "joshuasundance/restgdf",
"score": 3
} |
#### File: restgdf/restgdf/_getinfo.py
```python
import re
from typing import Union, Optional
from pandas import DataFrame, concat
from requests import Session, Response
FIELDDOESNOTEXIST: IndexError = IndexError("Field does not exist")
DEFAULTDICT: dict = {
"where": "1=1",
"outFields": "*",
"returnGeometry": True,
"returnCountOnly": False,
"f": "json",
}
DEFAULTDICTSTR: str = "\n".join([f"{k}: {v}" for k, v in DEFAULTDICT.items()])
def default_data(data: dict = None, default_dict: dict = None) -> dict:
f"""Return data dict after adding default values
Will not replace existing values
Defaults:
{DEFAULTDICTSTR}
"""
data = data or {}
default_dict = default_dict or DEFAULTDICT
new_data: dict = {k: v for k, v in data.items()}
for k, v in default_dict.items():
new_data[k] = new_data.get(k, v)
return new_data
def get_feature_count(url: str, session: Optional[Session] = None, **kwargs) -> int:
"""Return int number of features for a service
Keyword arguments are passed on to post request
"""
session = session or Session()
datadict: dict = {"where": "1=1", "returnCountOnly": True, "f": "json"}
if "data" in kwargs:
datadict["where"] = kwargs["data"].get("where", "1=1")
if "token" in kwargs["data"]:
datadict["token"] = kwargs["data"]["token"]
xkwargs: dict = {k: v for k, v in kwargs.items() if k != "data"}
response: Response = session.post(
f"{url}/query",
data=datadict,
**xkwargs,
# the line above provides keyword arguments other than data dict
# because data dict is manipulated for this function
# (this allows the use of token authentication, for example)
)
return response.json()["count"]
def get_jsondict(url: str, session: Optional[Session] = None, **kwargs) -> dict:
"""Return dict from a service's json
Keyword arguments are passed on to post request
"""
session = session or Session()
datadict: dict = default_data({}) if "data" not in kwargs else kwargs["data"]
xkwargs: dict = {k: v for k, v in kwargs.items() if k != "data"}
response: Response = session.post(url, data=datadict, **xkwargs)
return response.json()
def get_max_record_count(jsondict: dict) -> int:
"""Return int max record count for a service
Keyword arguments are passed on to post request
"""
# TODO: why is there inconsistency in maxRecordCount key
key_pattern: re.Pattern = re.compile(
r"max(imum)?(\s|_)?record(\s|_)?count$", flags=re.IGNORECASE
)
key_list: list = [key for key in jsondict if re.match(key_pattern, key)]
assert len(key_list) == 1
return jsondict[key_list[0]]
def get_offset_range(url: str, session: Optional[Session] = None, **kwargs) -> range:
"""Return offset range object using feature count and max record count
Keyword arguments are passed on to post request
"""
session = session or Session()
feature_count: int = get_feature_count(url, session, **kwargs)
jsondict: dict = get_jsondict(url, session, **kwargs)
max_record_count: int = get_max_record_count(jsondict)
return range(0, feature_count, max_record_count)
# TODO: convert post requests to session requests
# TODO: add docstrings
# TODO: implement class(url, s:Session=Session(), auth=None, **kwargs)
def get_name(jsondict: dict) -> str:
"""Return str name for a service"""
key_pattern: re.Pattern = re.compile("name", flags=re.IGNORECASE)
key_list: list = [key for key in jsondict if re.match(key_pattern, key)]
assert len(key_list) == 1
return jsondict[key_list[0]]
def getfields(jsondict: dict, types: bool = False):
"""Return list of field names or a name:type dict if types=True"""
if types:
return {
f["name"]: f["type"].replace("esriFieldType", "") for f in jsondict["fields"]
}
else:
return [f["name"] for f in jsondict["fields"]]
def getfields_df(jsondict: dict) -> DataFrame:
return DataFrame(
[
(f["name"], f["type"].replace("esriFieldType", ""))
for f in jsondict["fields"]
],
columns=["name", "type"],
)
def getuniquevalues(
url: str,
fields: Union[tuple, str],
sortby: str = None,
session: Optional[Session] = None,
**kwargs,
) -> Union[list, DataFrame]:
"""Return list of unique values if fields is str or list of len 1
Otherwise return pandas DataFrame of unique combinations, optionally sorted by field sortby
"""
session = session or Session()
datadict: dict = {
"where": "1=1",
"f": "json",
"returnGeometry": False,
"returnDistinctValues": True,
"outFields": fields if isinstance(fields, str) else ",".join(fields),
}
if "data" in kwargs:
datadict["where"] = kwargs["data"].get("where", "1=1")
if "token" in kwargs["data"]:
datadict["token"] = kwargs["data"]["token"]
xkwargs: dict = {k: v for k, v in kwargs.items() if k != "data"}
response: Response = session.post(f"{url}/query", data=datadict, **xkwargs)
jsondict: dict = response.json()
res_l: Union[list, None] = None
res_df: Union[DataFrame, None] = None
if isinstance(fields, str):
res_l = [x["attributes"][fields] for x in jsondict["features"]]
elif len(fields) == 1:
res_l = [x["attributes"][fields[0]] for x in jsondict["features"]]
else:
res_df = concat(
[DataFrame(x).T.reset_index(drop=True) for x in jsondict["features"]],
ignore_index=True,
)
if sortby:
res_df = res_df.sort_values(sortby).reset_index(drop=True)
return res_l or res_df
def getvaluecounts(
url: str, field: str, session: Optional[Session] = None, **kwargs
) -> DataFrame:
"""Return DataFrame containing value counts (or dict if retdict=True)"""
session = session or Session()
statstr: str = (
"[{"
f'"statisticType":"count","onStatisticField":"{field}",'
f'"outStatisticFieldName":"{field}_count"'
"}]"
)
datadict: dict = {
"where": "1=1",
"f": "json",
"returnGeometry": False,
"outFields": field,
"outStatistics": statstr,
"groupByFieldsForStatistics": field,
}
if "data" in kwargs:
datadict["where"] = kwargs["data"].get("where", "1=1")
if "token" in kwargs["data"]:
datadict["token"] = kwargs["data"]["token"]
xkwargs: dict = {k: v for k, v in kwargs.items() if k != "data"}
response: Response = session.post(f"{url}/query", data=datadict, **xkwargs)
jsondict: dict = response.json()
features: list = jsondict["features"]
cc: DataFrame = concat(
[DataFrame(x["attributes"], index=[0]) for x in features], ignore_index=True,
)
return cc.sort_values(f"{field}_count", ascending=False).reset_index(drop=True)
def nestedcount(
url: str, fields, session: Optional[Session] = None, **kwargs
) -> DataFrame:
"""Return DataFrame containing count values for 2-field combinations"""
session = session or Session()
statstr: str = "".join(
(
"[",
",".join(
f'{{"statisticType":"count","onStatisticField":"{f}","outStatisticFieldName":"{f}_count"}}'
for f in fields
),
"]",
)
)
datadict: dict = {
"where": "1=1",
"f": "json",
"returnGeometry": False,
"outFields": ",".join(fields),
"outStatistics": statstr,
"groupByFieldsForStatistics": ",".join(fields),
}
if "data" in kwargs:
datadict["where"] = kwargs["data"].get("where", "1=1")
if "token" in kwargs["data"]:
datadict["token"] = kwargs["data"]["token"]
xkwargs: dict = {k: v for k, v in kwargs.items() if k != "data"}
response: Response = session.post(f"{url}/query", data=datadict, **xkwargs)
jsondict: dict = response.json()
features: list = jsondict["features"]
cc: DataFrame = concat(
[DataFrame(x).T.reset_index(drop=True) for x in features], ignore_index=True,
)
dropcol: str = [c for c in cc.columns if c.startswith(f"{fields[0]}_count")][0]
rencol: str = [c for c in cc.columns if c.startswith(f"{fields[1]}_count")][0]
return (
cc.drop(columns=dropcol)
.rename(columns={rencol: "Count"})
.sort_values([fields[0], "Count"], ascending=[True, False])
.reset_index(drop=True)
)
```
#### File: joshuasundance/restgdf/setup.py
```python
from setuptools import setup
def get_long_description():
with open("README.md", encoding="utf-8") as f:
return f.read()
setup(
name="restgdf",
version="0.2",
description="improved esri rest io for geopandas",
long_description="provides getgdf function and Rest class for gdf from rest io",
url="https://github.com/joshuasundance/restgdf",
author="<NAME>",
author_email="<EMAIL>",
license="BSD",
packages=["restgdf"],
zip_safe=False,
install_requires=["requests", "pandas", "fiona", "geopandas"],
)
``` |
{
"source": "joshuataylor/dagster",
"score": 2
} |
#### File: dagster_graphql/schema/jobs.py
```python
import pendulum
import yaml
from dagster import check
from dagster.core.definitions.job import JobType, RunRequest
from dagster.core.host_representation import (
ExternalScheduleExecutionData,
ExternalScheduleExecutionErrorData,
)
from dagster.core.scheduler.job import (
JobState,
JobStatus,
JobTick,
JobTickStatus,
ScheduleJobData,
SensorJobData,
)
from dagster.core.storage.pipeline_run import PipelineRunsFilter
from dagster.core.storage.tags import TagType, get_tag_type
from dagster_graphql import dauphin
class DauphinJobTick(dauphin.ObjectType):
class Meta:
name = "JobTick"
id = dauphin.NonNull(dauphin.ID)
status = dauphin.NonNull("JobTickStatus")
timestamp = dauphin.NonNull(dauphin.Float)
runIds = dauphin.non_null_list(dauphin.String)
error = dauphin.Field("PythonError")
skipReason = dauphin.String()
runs = dauphin.non_null_list("PipelineRun")
def __init__(self, _, job_tick):
self._job_tick = check.inst_param(job_tick, "job_tick", JobTick)
super(DauphinJobTick, self).__init__(
status=job_tick.status,
timestamp=job_tick.timestamp,
runIds=job_tick.run_ids,
error=job_tick.error,
skipReason=job_tick.skip_reason,
)
def resolve_id(self, _):
return "%s:%s" % (self._job_tick.job_origin_id, self._job_tick.timestamp)
def resolve_runs(self, graphene_info):
instance = graphene_info.context.instance
return [
graphene_info.schema.type_named("PipelineRun")(instance.get_run_by_id(run_id))
for run_id in self._job_tick.run_ids
if instance.has_run(run_id)
]
class DauphinFutureJobTick(dauphin.ObjectType):
class Meta(object):
name = "FutureJobTick"
timestamp = dauphin.NonNull(dauphin.Float)
evaluationResult = dauphin.Field("TickEvaluation")
def __init__(self, job_state, timestamp):
self._job_state = check.inst_param(job_state, "job_state", JobState)
self._timestamp = timestamp
super(DauphinFutureJobTick, self).__init__(
timestamp=check.float_param(timestamp, "timestamp"),
)
def resolve_evaluationResult(self, graphene_info):
if self._job_state.status != JobStatus.RUNNING:
return None
if self._job_state.job_type != JobType.SCHEDULE:
return None
repository_origin = self._job_state.origin.external_repository_origin
if not graphene_info.context.has_repository_location(
repository_origin.repository_location_origin.location_name
):
return None
repository_location = graphene_info.context.get_repository_location(
repository_origin.repository_location_origin.location_name
)
if not repository_location.has_repository(repository_origin.repository_name):
return None
repository = repository_location.get_repository(repository_origin.repository_name)
external_schedule = repository.get_external_job(self._job_state.name)
timezone_str = external_schedule.execution_timezone
if not timezone_str:
timezone_str = pendulum.now().timezone.name
next_tick_datetime = next(external_schedule.execution_time_iterator(self._timestamp))
schedule_time = pendulum.instance(next_tick_datetime).in_tz(timezone_str)
schedule_data = repository_location.get_external_schedule_execution_data(
instance=graphene_info.context.instance,
repository_handle=repository.handle,
schedule_name=external_schedule.name,
scheduled_execution_time=schedule_time,
)
return graphene_info.schema.type_named("TickEvaluation")(schedule_data)
class DauphinTickEvaluation(dauphin.ObjectType):
class Meta(object):
name = "TickEvaluation"
runRequests = dauphin.List("RunRequest")
skipReason = dauphin.String()
error = dauphin.Field("PythonError")
def __init__(self, schedule_data):
check.inst_param(
schedule_data,
"schedule_data",
(ExternalScheduleExecutionData, ExternalScheduleExecutionErrorData),
)
error = (
schedule_data.error
if isinstance(schedule_data, ExternalScheduleExecutionErrorData)
else None
)
skip_reason = (
schedule_data.skip_message
if isinstance(schedule_data, ExternalScheduleExecutionData)
else None
)
self._run_requests = (
schedule_data.run_requests
if isinstance(schedule_data, ExternalScheduleExecutionData)
else None
)
super(DauphinTickEvaluation, self).__init__(skipReason=skip_reason, error=error)
def resolve_runRequests(self, graphene_info):
if not self._run_requests:
return self._run_requests
return [
graphene_info.schema.type_named("RunRequest")(run_request)
for run_request in self._run_requests
]
class DauphinRunRequest(dauphin.ObjectType):
class Meta(object):
name = "RunRequest"
runKey = dauphin.String()
tags = dauphin.non_null_list("PipelineTag")
runConfigYaml = dauphin.NonNull(dauphin.String)
def __init__(self, run_request):
super(DauphinRunRequest, self).__init__(runKey=run_request.run_key)
self._run_request = check.inst_param(run_request, "run_request", RunRequest)
def resolve_tags(self, graphene_info):
return [
graphene_info.schema.type_named("PipelineTag")(key=key, value=value)
for key, value in self._run_request.tags.items()
if get_tag_type(key) != TagType.HIDDEN
]
def resolve_runConfigYaml(self, _graphene_info):
return yaml.dump(self._run_request.run_config, default_flow_style=False, allow_unicode=True)
class DauphinFutureJobTicks(dauphin.ObjectType):
class Meta(object):
name = "FutureJobTicks"
results = dauphin.non_null_list("FutureJobTick")
cursor = dauphin.NonNull(dauphin.Float)
class DauphinJobState(dauphin.ObjectType):
class Meta:
name = "JobState"
id = dauphin.NonNull(dauphin.ID)
name = dauphin.NonNull(dauphin.String)
jobType = dauphin.NonNull("JobType")
status = dauphin.NonNull("JobStatus")
repositoryOrigin = dauphin.NonNull("RepositoryOrigin")
jobSpecificData = dauphin.Field("JobSpecificData")
runs = dauphin.Field(dauphin.non_null_list("PipelineRun"), limit=dauphin.Int())
runsCount = dauphin.NonNull(dauphin.Int)
ticks = dauphin.Field(dauphin.non_null_list("JobTick"), limit=dauphin.Int())
runningCount = dauphin.NonNull(dauphin.Int) # remove with cron scheduler
def __init__(self, job_state):
self._job_state = check.inst_param(job_state, "job_state", JobState)
super(DauphinJobState, self).__init__(
id=job_state.job_origin_id,
name=job_state.name,
jobType=job_state.job_type,
status=job_state.status,
)
def resolve_repositoryOrigin(self, graphene_info):
origin = self._job_state.origin.external_repository_origin
return graphene_info.schema.type_named("RepositoryOrigin")(origin)
def resolve_jobSpecificData(self, graphene_info):
if not self._job_state.job_specific_data:
return None
if self._job_state.job_type == JobType.SENSOR:
return graphene_info.schema.type_named("SensorJobData")(
self._job_state.job_specific_data
)
if self._job_state.job_type == JobType.SCHEDULE:
return graphene_info.schema.type_named("ScheduleJobData")(
self._job_state.job_specific_data
)
return None
def resolve_runs(self, graphene_info, **kwargs):
if self._job_state.job_type == JobType.SENSOR:
filters = PipelineRunsFilter.for_sensor(self._job_state)
else:
filters = PipelineRunsFilter.for_schedule(self._job_state)
return [
graphene_info.schema.type_named("PipelineRun")(r)
for r in graphene_info.context.instance.get_runs(
filters=filters, limit=kwargs.get("limit"),
)
]
def resolve_runsCount(self, graphene_info):
if self._job_state.job_type == JobType.SENSOR:
filters = PipelineRunsFilter.for_sensor(self._job_state)
else:
filters = PipelineRunsFilter.for_schedule(self._job_state)
return graphene_info.context.instance.get_runs_count(filters=filters)
def resolve_ticks(self, graphene_info, limit=None):
ticks = graphene_info.context.instance.get_job_ticks(self._job_state.job_origin_id)
if limit:
ticks = ticks[:limit]
return [graphene_info.schema.type_named("JobTick")(graphene_info, tick) for tick in ticks]
def resolve_runningCount(self, graphene_info):
if self._job_state.job_type == JobType.SENSOR:
return 1 if self._job_state.status == JobStatus.RUNNING else 0
else:
return graphene_info.context.instance.running_schedule_count(
self._job_state.job_origin_id
)
class DauphinJobSpecificData(dauphin.Union):
class Meta:
name = "JobSpecificData"
types = ("SensorJobData", "ScheduleJobData")
class DauphinSensorJobData(dauphin.ObjectType):
class Meta:
name = "SensorJobData"
lastTickTimestamp = dauphin.Float()
lastRunKey = dauphin.String()
def __init__(self, job_specific_data):
check.inst_param(job_specific_data, "job_specific_data", SensorJobData)
super(DauphinSensorJobData, self).__init__(
lastTickTimestamp=job_specific_data.last_tick_timestamp,
lastRunKey=job_specific_data.last_run_key,
)
class DauphinScheduleJobData(dauphin.ObjectType):
class Meta:
name = "ScheduleJobData"
cronSchedule = dauphin.NonNull(dauphin.String)
startTimestamp = dauphin.Float()
def __init__(self, job_specific_data):
check.inst_param(job_specific_data, "job_specific_data", ScheduleJobData)
super(DauphinScheduleJobData, self).__init__(
cronSchedule=job_specific_data.cron_schedule,
startTimestamp=job_specific_data.start_timestamp,
)
class DauphinJobStatesOrError(dauphin.Union):
class Meta:
name = "JobStatesOrError"
types = ("JobStates", "PythonError")
class DauphinJobStates(dauphin.ObjectType):
class Meta:
name = "JobStates"
results = dauphin.non_null_list("JobState")
DauphinJobType = dauphin.Enum.from_enum(JobType)
DauphinJobStatus = dauphin.Enum.from_enum(JobStatus)
DauphinJobTickStatus = dauphin.Enum.from_enum(JobTickStatus)
```
#### File: schema/schedules/schedules.py
```python
import pendulum
from dagster import check
from dagster.core.host_representation import ExternalSchedule
from dagster.seven import get_current_datetime_in_utc, get_timestamp_from_utc_datetime
from dagster_graphql import dauphin
from dagster_graphql.schema.errors import (
DauphinPythonError,
DauphinRepositoryNotFoundError,
DauphinScheduleNotFoundError,
)
class DauphinScheduleOrError(dauphin.Union):
class Meta:
name = "ScheduleOrError"
types = ("Schedule", DauphinScheduleNotFoundError, DauphinPythonError)
class DauphinSchedules(dauphin.ObjectType):
class Meta:
name = "Schedules"
results = dauphin.non_null_list("Schedule")
class DauphinSchedulesOrError(dauphin.Union):
class Meta:
name = "SchedulesOrError"
types = (DauphinSchedules, DauphinRepositoryNotFoundError, DauphinPythonError)
class DauphinSchedule(dauphin.ObjectType):
class Meta:
name = "Schedule"
id = dauphin.NonNull(dauphin.ID)
name = dauphin.NonNull(dauphin.String)
cron_schedule = dauphin.NonNull(dauphin.String)
pipeline_name = dauphin.NonNull(dauphin.String)
solid_selection = dauphin.List(dauphin.String)
mode = dauphin.NonNull(dauphin.String)
execution_timezone = dauphin.Field(dauphin.String)
scheduleState = dauphin.NonNull("JobState")
partition_set = dauphin.Field("PartitionSet")
futureTicks = dauphin.NonNull("FutureJobTicks", cursor=dauphin.Float(), limit=dauphin.Int())
futureTick = dauphin.NonNull("FutureJobTick", tick_timestamp=dauphin.NonNull(dauphin.Int))
def resolve_id(self, _):
return "%s:%s" % (self.name, self.pipeline_name)
def resolve_partition_set(self, graphene_info):
if self._external_schedule.partition_set_name is None:
return None
repository = graphene_info.context.get_repository_location(
self._external_schedule.handle.location_name
).get_repository(self._external_schedule.handle.repository_name)
external_partition_set = repository.get_external_partition_set(
self._external_schedule.partition_set_name
)
return graphene_info.schema.type_named("PartitionSet")(
external_repository_handle=repository.handle,
external_partition_set=external_partition_set,
)
def resolve_futureTicks(self, graphene_info, **kwargs):
cursor = kwargs.get(
"cursor", get_timestamp_from_utc_datetime(get_current_datetime_in_utc())
)
limit = kwargs.get("limit", 10)
tick_times = []
time_iter = self._external_schedule.execution_time_iterator(cursor)
for _ in range(limit):
tick_times.append(next(time_iter).timestamp())
future_ticks = [
graphene_info.schema.type_named("FutureJobTick")(self._schedule_state, tick_time)
for tick_time in tick_times
]
return graphene_info.schema.type_named("FutureJobTicks")(
results=future_ticks, cursor=tick_times[-1] + 1
)
def resolve_futureTick(self, graphene_info, tick_timestamp):
return graphene_info.schema.type_named("FutureJobTick")(
self._schedule_state, float(tick_timestamp)
)
def __init__(self, graphene_info, external_schedule):
self._external_schedule = check.inst_param(
external_schedule, "external_schedule", ExternalSchedule
)
self._schedule_state = graphene_info.context.instance.get_job_state(
self._external_schedule.get_external_origin_id()
)
if not self._schedule_state:
# Also include a ScheduleState for a stopped schedule that may not
# have a stored database row yet
self._schedule_state = self._external_schedule.get_default_job_state(
graphene_info.context.instance
)
super(DauphinSchedule, self).__init__(
name=external_schedule.name,
cron_schedule=external_schedule.cron_schedule,
pipeline_name=external_schedule.pipeline_name,
solid_selection=external_schedule.solid_selection,
mode=external_schedule.mode,
scheduleState=graphene_info.schema.type_named("JobState")(self._schedule_state),
execution_timezone=(
self._external_schedule.execution_timezone
if self._external_schedule.execution_timezone
else pendulum.now().timezone.name
),
)
``` |
{
"source": "joshua-taylor/dataIntegrator",
"score": 2
} |
#### File: uploads/core/models.py
```python
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
#for validation of uploaded files:
def validate_file_extension(value):
import os
from django.core.exceptions import ValidationError
ext = os.path.splitext(value.name)[1]
valid_extensions = ['.csv','.xlsx','.xls']
if not ext.lower() in valid_extensions:
raise ValidationError(u'Unsupported file extension, please ensure you are uploading a spreadsheet with a file type ending in "csv", "xlsx", or "xls"')
class UserManager(BaseUserManager):
"""Define a model manager for User model with no username field."""
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
"""Create and save a User with the given email and password."""
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
"""Create and save a regular User with the given email and password."""
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
"""Create and save a SuperUser with the given email and password."""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class CustomUser(AbstractBaseUser, PermissionsMixin):
username = None
email = models.EmailField(_('email'), unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.email
class Document(models.Model):
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
description = models.CharField(max_length=255, blank=True)
document = models.FileField(upload_to='documents/', validators=[validate_file_extension])
uploaded_at = models.DateTimeField(auto_now_add=True)
``` |
{
"source": "joshuataylor/great_expectations",
"score": 3
} |
#### File: great_expectations/cli/python_subprocess.py
```python
import os
import sys
import time
import traceback
from subprocess import PIPE, CalledProcessError, CompletedProcess, Popen, run
from typing import Union
import click
from great_expectations.core import logger
def execute_shell_command(command: str) -> int:
"""
Execute a shell (bash in the present case) command from inside Python program.
While developed independently, this function is very similar to the one, offered in this StackOverflow article:
https://stackoverflow.com/questions/30993411/environment-variables-using-subprocess-check-output-python
:param command: bash command -- as if typed in a shell/Terminal window
:return: status code -- 0 if successful; all other values (1 is the most common) indicate an error
"""
cwd: str = os.getcwd()
path_env_var: str = os.pathsep.join([os.environ.get("PATH", os.defpath), cwd])
env: dict = dict(os.environ, PATH=path_env_var)
status_code: int = 0
try:
res: CompletedProcess = run(
args=["bash", "-c", command],
stdin=None,
input=None,
stdout=None,
stderr=None,
capture_output=True,
shell=False,
cwd=cwd,
timeout=None,
check=True,
encoding=None,
errors=None,
text=None,
env=env,
universal_newlines=True,
)
sh_out: str = res.stdout.strip()
logger.info(sh_out)
except CalledProcessError as cpe:
status_code = cpe.returncode
sys.stderr.write(cpe.output)
sys.stderr.flush()
exception_message: str = "A Sub-Process call Exception occurred.\n"
exception_traceback: str = traceback.format_exc()
exception_message += (
f'{type(cpe).__name__}: "{str(cpe)}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message)
return status_code
def execute_shell_command_with_progress_polling(command: str) -> int:
"""
Execute a shell (bash in the present case) command from inside Python program with polling (to enable progress bar).
:param command: bash command -- as if typed in a shell/Terminal window
:return: status code -- 0 if successful; all other values (1 is the most common) indicate an error
"""
cwd: str = os.getcwd()
path_env_var: str = os.pathsep.join([os.environ.get("PATH", os.defpath), cwd])
env: dict = dict(os.environ, PATH=path_env_var)
status_code: int
bar_length_100_percent: int = 100
max_work_amount: int = bar_length_100_percent
poll_period_seconds: int = 1
gathered: int = 0
progress: float
with click.progressbar(length=bar_length_100_percent, label=command) as bar:
try:
with Popen(
args=["bash", "-c", command],
bufsize=-1,
executable=None,
stdin=None,
stdout=PIPE,
stderr=PIPE,
preexec_fn=None,
close_fds=True,
shell=False,
cwd=cwd,
env=env,
universal_newlines=True,
startupinfo=None,
creationflags=0,
restore_signals=True,
start_new_session=False,
pass_fds=(),
encoding=None,
errors=None,
) as proc:
poll_status_code: Union[int, None] = proc.poll()
poll_stdout: str = proc.stdout.readline()
while poll_status_code is None:
gathered += max([len(poll_stdout), poll_period_seconds])
progress = float(gathered) / max_work_amount
excess: float = progress - 1.0
if excess > 0:
if 0.0 < excess <= 1.0:
max_work_amount += 2.0 * excess * max_work_amount
elif 1.0 < excess <= 2.0:
max_work_amount += 5.0 * excess * max_work_amount
elif 2.0 < excess <= 1.0e1:
max_work_amount += 1.0e1 * excess * max_work_amount
else:
max_work_amount += 1.0e2 * excess * max_work_amount
progress = float(gathered) / max_work_amount
bar.pos = int(progress * (bar_length_100_percent - 1)) + 1
bar.update(0)
time.sleep(poll_period_seconds)
poll_status_code = proc.poll()
poll_stdout = proc.stdout.readline()
status_code = proc.returncode
if status_code != poll_status_code:
status_code = 1
else:
bar.pos = bar_length_100_percent
bar.update(0)
except CalledProcessError as cpe:
status_code = cpe.returncode
sys.stderr.write(cpe.output)
sys.stderr.flush()
exception_message: str = "A Sub-Process call Exception occurred.\n"
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(cpe).__name__}: "{str(cpe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
return status_code
```
#### File: great_expectations/core/data_context_key.py
```python
from abc import ABCMeta, abstractmethod
class DataContextKey(object, metaclass=ABCMeta):
"""DataContextKey objects are used to uniquely identify resources used by the DataContext.
A DataContextKey is designed to support clear naming with multiple representations including a hashable
version making it suitable for use as the key in a dictionary.
"""
@abstractmethod
def to_tuple(self):
pass
@classmethod
def from_tuple(cls, tuple_):
return cls(*tuple_)
def to_fixed_length_tuple(self):
raise NotImplementedError
@classmethod
def from_fixed_length_tuple(cls, tuple_):
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
return self.to_tuple() == other.to_tuple()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.to_tuple())
def __repr__(self):
return self.__class__.__name__ + "::" + "/".join(self.to_tuple())
class StringKey(DataContextKey):
"""A simple DataContextKey with just a single string value"""
def __init__(self, key):
self._key = key
def to_tuple(self):
return (self._key,)
def to_fixed_length_tuple(self):
return self.to_tuple()
@classmethod
def from_fixed_length_tuple(cls, tuple_):
return cls.from_tuple(tuple_)
```
#### File: usage_statistics/anonymizers/anonymizer.py
```python
import logging
from hashlib import md5
from great_expectations.util import load_class
logger = logging.getLogger(__name__)
class Anonymizer(object):
"""Anonymize string names in an optionally-consistent way."""
def __init__(self, salt=None):
if salt is not None and not isinstance(salt, str):
logger.error("invalid salt: must provide a string. Setting a random salt.")
salt = None
if salt is None:
import secrets
self._salt = secrets.token_hex(8)
else:
self._salt = salt
@property
def salt(self):
return self._salt
def anonymize(self, string_):
salted = self._salt + string_
return md5(salted.encode("utf-8")).hexdigest()
def anonymize_object_info(
self,
anonymized_info_dict,
ge_classes,
object_=None,
object_class=None,
object_config=None,
):
assert (
object_ or object_class or object_config
), "Must pass either object_ or object_class or object_config."
try:
if object_class is None and object_ is not None:
object_class = object_.__class__
elif object_class is None and object_config is not None:
object_class_name = object_config.get("class_name")
object_module_name = object_config.get("module_name")
object_class = load_class(object_class_name, object_module_name)
object_class_name = object_class.__name__
for ge_class in ge_classes:
if issubclass(object_class, ge_class):
anonymized_info_dict["parent_class"] = ge_class.__name__
if not object_class == ge_class:
anonymized_info_dict["anonymized_class"] = self.anonymize(
object_class_name
)
break
if not anonymized_info_dict.get("parent_class"):
anonymized_info_dict["parent_class"] = "__not_recognized__"
anonymized_info_dict["anonymized_class"] = self.anonymize(
object_class_name
)
except AttributeError:
anonymized_info_dict["parent_class"] = "__not_recognized__"
anonymized_info_dict["anonymized_class"] = self.anonymize(object_class_name)
return anonymized_info_dict
```
#### File: datasource/batch_kwargs_generator/s3_batch_kwargs_generator.py
```python
import datetime
import logging
import re
import warnings
from great_expectations.datasource.batch_kwargs_generator.batch_kwargs_generator import (
BatchKwargsGenerator,
)
from great_expectations.datasource.types import S3BatchKwargs
from great_expectations.exceptions import BatchKwargsError, GreatExpectationsError
try:
import boto3
except ImportError:
boto3 = None
logger = logging.getLogger(__name__)
class S3GlobReaderBatchKwargsGenerator(BatchKwargsGenerator):
"""
S3 BatchKwargGenerator provides support for generating batches of data from an S3 bucket. For the S3 batch kwargs generator, assets must
be individually defined using a prefix and glob, although several additional configuration parameters are available
for assets (see below).
Example configuration::
datasources:
my_datasource:
...
batch_kwargs_generator:
my_s3_generator:
class_name: S3GlobReaderBatchKwargsGenerator
bucket: my_bucket.my_organization.priv
reader_method: parquet # This will be automatically inferred from suffix where possible, but can be explicitly specified as well
reader_options: # Note that reader options can be specified globally or per-asset
sep: ","
delimiter: "/" # Note that this is the delimiter for the BUCKET KEYS. By default it is "/"
boto3_options:
endpoint_url: $S3_ENDPOINT # Use the S3_ENDPOINT environment variable to determine which endpoint to use
max_keys: 100 # The maximum number of keys to fetch in a single list_objects request to s3. When accessing batch_kwargs through an iterator, the iterator will silently refetch if more keys were available
assets:
my_first_asset:
prefix: my_first_asset/
regex_filter: .* # The regex filter will filter the results returned by S3 for the key and prefix to only those matching the regex
directory_assets: True
access_logs:
prefix: access_logs
regex_filter: access_logs/2019.*\.csv.gz
sep: "~"
max_keys: 100
"""
recognized_batch_parameters = {
"data_asset_name",
"partition_id",
"reader_method",
"reader_options",
"limit",
}
# FIXME add tests for new partitioner functionality
def __init__(
self,
name="default",
datasource=None,
bucket=None,
reader_options=None,
assets=None,
delimiter="/",
reader_method=None,
boto3_options=None,
max_keys=1000,
):
"""Initialize a new S3GlobReaderBatchKwargsGenerator
Args:
name: the name of the batch kwargs generator
datasource: the datasource to which it is attached
bucket: the name of the s3 bucket from which it generates batch_kwargs
reader_options: options passed to the datasource reader method
assets: asset configuration (see class docstring for more information)
delimiter: the BUCKET KEY delimiter
reader_method: the reader_method to include in generated batch_kwargs
boto3_options: dictionary of key-value pairs to use when creating boto3 client or resource objects
max_keys: the maximum number of keys to fetch in a single list_objects request to s3
"""
super().__init__(name, datasource=datasource)
if reader_options is None:
reader_options = {}
if assets is None:
assets = {"default": {"prefix": "", "regex_filter": ".*"}}
self._bucket = bucket
self._reader_method = reader_method
self._reader_options = reader_options
self._assets = assets
self._delimiter = delimiter
if boto3_options is None:
boto3_options = {}
self._max_keys = max_keys
self._iterators = {}
try:
self._s3 = boto3.client("s3", **boto3_options)
except TypeError:
raise (
ImportError(
"Unable to load boto3, which is required for S3 batch kwargs generator"
)
)
@property
def reader_options(self):
return self._reader_options
@property
def assets(self):
return self._assets
@property
def bucket(self):
return self._bucket
def get_available_data_asset_names(self):
return {"names": [(key, "file") for key in self._assets.keys()]}
def _get_iterator(
self, data_asset_name, reader_method=None, reader_options=None, limit=None
):
logger.debug(
"Beginning S3GlobReaderBatchKwargsGenerator _get_iterator for data_asset_name: %s"
% data_asset_name
)
if data_asset_name not in self._assets:
batch_kwargs = {
"data_asset_name": data_asset_name,
"reader_method": reader_method,
"reader_options": reader_options,
"limit": limit,
}
raise BatchKwargsError(
"Unknown asset_name %s" % data_asset_name, batch_kwargs
)
if data_asset_name not in self._iterators:
self._iterators[data_asset_name] = {}
asset_config = self._assets[data_asset_name]
return self._build_asset_iterator(
asset_config=asset_config,
iterator_dict=self._iterators[data_asset_name],
reader_method=reader_method,
reader_options=reader_options,
limit=limit,
)
def _build_batch_kwargs_path_iter(self, path_list, reader_options=None, limit=None):
for path in path_list:
yield self._build_batch_kwargs(
path, reader_options=reader_options, limit=limit
)
def _build_batch_kwargs(self, batch_parameters):
try:
data_asset_name = batch_parameters.pop("data_asset_name")
except KeyError:
raise BatchKwargsError(
"Unable to build BatchKwargs: no name provided in batch_parameters.",
batch_kwargs=batch_parameters,
)
partition_id = batch_parameters.pop("partition_id", None)
batch_kwargs = self._datasource.process_batch_parameters(batch_parameters)
if partition_id:
try:
asset_config = self._assets[data_asset_name]
except KeyError:
raise GreatExpectationsError(
"No asset config found for asset %s" % data_asset_name
)
if data_asset_name not in self._iterators:
self._iterators[data_asset_name] = {}
iterator_dict = self._iterators[data_asset_name]
for key in self._get_asset_options(asset_config, iterator_dict):
if (
self._partitioner(key=key, asset_config=asset_config)
== partition_id
):
batch_kwargs = self._build_batch_kwargs_from_key(
key=key,
asset_config=asset_config,
reader_options=batch_parameters.get(
"reader_options"
), # handled in generator
limit=batch_kwargs.get(
"limit"
), # may have been processed from datasource
)
if batch_kwargs is None:
raise BatchKwargsError(
"Unable to identify partition %s for asset %s"
% (partition_id, data_asset_name),
{data_asset_name: data_asset_name, partition_id: partition_id},
)
return batch_kwargs
else:
return self.yield_batch_kwargs(
data_asset_name=data_asset_name, **batch_parameters, **batch_kwargs
)
def _build_batch_kwargs_from_key(
self,
key,
asset_config=None,
reader_method=None,
reader_options=None,
limit=None,
):
batch_kwargs = {
"s3": "s3a://" + self.bucket + "/" + key,
"reader_options": self.reader_options,
}
if asset_config.get("reader_options"):
batch_kwargs["reader_options"].update(asset_config.get("reader_options"))
if reader_options is not None:
batch_kwargs["reader_options"].update(reader_options)
if self._reader_method is not None:
batch_kwargs["reader_method"] = self._reader_method
if asset_config.get("reader_method"):
batch_kwargs["reader_method"] = asset_config.get("reader_method")
if reader_method is not None:
batch_kwargs["reader_method"] = reader_method
if limit:
batch_kwargs["limit"] = limit
return S3BatchKwargs(batch_kwargs)
def _get_asset_options(self, asset_config, iterator_dict):
query_options = {
"Bucket": self.bucket,
"Delimiter": asset_config.get("delimiter", self._delimiter),
"Prefix": asset_config.get("prefix", None),
"MaxKeys": asset_config.get("max_keys", self._max_keys),
}
directory_assets = asset_config.get("directory_assets", False)
if "continuation_token" in iterator_dict:
query_options.update(
{"ContinuationToken": iterator_dict["continuation_token"]}
)
logger.debug(
"Fetching objects from S3 with query options: %s" % str(query_options)
)
asset_options = self._s3.list_objects_v2(**query_options)
if directory_assets:
if "CommonPrefixes" not in asset_options:
raise BatchKwargsError(
"Unable to build batch_kwargs. The asset may not be configured correctly. If directory assets "
"are requested, then common prefixes must be returned.",
{
"asset_configuration": asset_config,
"contents": asset_options["Contents"]
if "Contents" in asset_options
else None,
},
)
keys = [item["Prefix"] for item in asset_options["CommonPrefixes"]]
else:
if "Contents" not in asset_options:
raise BatchKwargsError(
"Unable to build batch_kwargs. The asset may not be configured correctly. If s3 returned common "
"prefixes it may not have been able to identify desired keys, and they are included in the "
"incomplete batch_kwargs object returned with this error.",
{
"asset_configuration": asset_config,
"common_prefixes": asset_options["CommonPrefixes"]
if "CommonPrefixes" in asset_options
else None,
},
)
keys = [
item["Key"] for item in asset_options["Contents"] if item["Size"] > 0
]
keys = [
key
for key in filter(
lambda x: re.match(asset_config.get("regex_filter", ".*"), x)
is not None,
keys,
)
]
for key in keys:
yield key
if asset_options["IsTruncated"]:
iterator_dict["continuation_token"] = asset_options["NextContinuationToken"]
# Recursively fetch more
for key in self._get_asset_options(asset_config, iterator_dict):
yield key
elif "continuation_token" in iterator_dict:
# Make sure we clear the token once we've gotten fully through
del iterator_dict["continuation_token"]
def _build_asset_iterator(
self,
asset_config,
iterator_dict,
reader_method=None,
reader_options=None,
limit=None,
):
for key in self._get_asset_options(asset_config, iterator_dict):
yield self._build_batch_kwargs_from_key(
key,
asset_config,
reader_method=None,
reader_options=reader_options,
limit=limit,
)
# TODO: deprecate generator_asset argument
def get_available_partition_ids(self, generator_asset=None, data_asset_name=None):
assert (generator_asset and not data_asset_name) or (
not generator_asset and data_asset_name
), "Please provide either generator_asset or data_asset_name."
if generator_asset:
warnings.warn(
"The 'generator_asset' argument will be deprecated and renamed to 'data_asset_name'. "
"Please update code accordingly.",
DeprecationWarning,
)
data_asset_name = generator_asset
if data_asset_name not in self._iterators:
self._iterators[data_asset_name] = {}
iterator_dict = self._iterators[data_asset_name]
asset_config = self._assets[data_asset_name]
available_ids = [
self._partitioner(key=key, asset_config=asset_config)
for key in self._get_asset_options(data_asset_name, iterator_dict)
]
return available_ids
def _partitioner(self, key, asset_config):
if "partition_regex" in asset_config:
match_group_id = asset_config.get("match_group_id", 1)
matches = re.match(asset_config["partition_regex"], key)
# In the case that there is a defined regex, the user *wanted* a partition. But it didn't match.
# So, we'll add a *sortable* id
if matches is None:
logger.warning("No match found for key: %s" % key)
return (
datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
+ "__unmatched"
)
else:
try:
return matches.group(match_group_id)
except IndexError:
logger.warning(
"No match group %d in key %s" % (match_group_id, key)
)
return (
datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
+ "__no_match_group"
)
# If there is no partitioner defined, fall back on using the path as a partition_id
else:
prefix = asset_config.get("prefix", "")
if key.startswith(prefix):
key = key[len(prefix) :]
return key
```
#### File: great_expectations/datasource/sparkdf_datasource.py
```python
import datetime
import logging
import uuid
from great_expectations.datasource.types import BatchMarkers
from great_expectations.types import ClassConfig
from ..core.batch import Batch
from ..dataset import SparkDFDataset
from ..exceptions import BatchKwargsError
from ..types.configurations import classConfigSchema
from .datasource import Datasource
logger = logging.getLogger(__name__)
try:
from pyspark.sql import SparkSession, DataFrame
except ImportError:
SparkSession = None
# TODO: review logging more detail here
logger.debug(
"Unable to load pyspark; install optional spark dependency for support."
)
class SparkDFDatasource(Datasource):
"""The SparkDFDatasource produces SparkDFDatasets and supports generators capable of interacting with local
filesystem (the default subdir_reader batch kwargs generator) and databricks notebooks.
Accepted Batch Kwargs:
- PathBatchKwargs ("path" or "s3" keys)
- InMemoryBatchKwargs ("dataset" key)
- QueryBatchKwargs ("query" key)
"""
recognized_batch_parameters = {
"reader_method",
"reader_options",
"limit",
"dataset_options",
}
@classmethod
def build_configuration(
cls,
data_asset_type=None,
batch_kwargs_generators=None,
spark_config=None,
**kwargs
):
"""
Build a full configuration object for a datasource, potentially including generators with defaults.
Args:
data_asset_type: A ClassConfig dictionary
batch_kwargs_generators: Generator configuration dictionary
spark_config: dictionary of key-value pairs to pass to the spark builder
**kwargs: Additional kwargs to be part of the datasource constructor's initialization
Returns:
A complete datasource configuration.
"""
if data_asset_type is None:
data_asset_type = {
"class_name": "SparkDFDataset",
"module_name": "great_expectations.dataset",
}
else:
data_asset_type = classConfigSchema.dump(ClassConfig(**data_asset_type))
if spark_config is None:
spark_config = {}
configuration = kwargs
configuration.update(
{"data_asset_type": data_asset_type, "spark_config": spark_config}
)
if batch_kwargs_generators:
configuration["batch_kwargs_generators"] = batch_kwargs_generators
return configuration
def __init__(
self,
name="default",
data_context=None,
data_asset_type=None,
batch_kwargs_generators=None,
spark_config=None,
**kwargs
):
"""Build a new SparkDFDatasource instance.
Args:
name: the name of this datasource
data_context: the DataContext to which this datasource is connected
data_asset_type: ClassConfig describing the data_asset type to be constructed by this datasource
batch_kwargs_generators: generator configuration
spark_config: dictionary of key-value pairs to be set on the spark session builder
**kwargs: Additional
"""
configuration_with_defaults = SparkDFDatasource.build_configuration(
data_asset_type, batch_kwargs_generators, spark_config, **kwargs
)
data_asset_type = configuration_with_defaults.pop("data_asset_type")
batch_kwargs_generators = configuration_with_defaults.pop(
"batch_kwargs_generators", None
)
super().__init__(
name,
data_context=data_context,
data_asset_type=data_asset_type,
batch_kwargs_generators=batch_kwargs_generators,
**configuration_with_defaults
)
try:
builder = SparkSession.builder
for k, v in configuration_with_defaults["spark_config"].items():
builder.config(k, v)
self.spark = builder.getOrCreate()
except AttributeError:
logger.error(
"Unable to load spark context; install optional spark dependency for support."
)
self.spark = None
self._build_generators()
def process_batch_parameters(
self, reader_method=None, reader_options=None, limit=None, dataset_options=None
):
batch_kwargs = super().process_batch_parameters(
limit=limit, dataset_options=dataset_options,
)
# Apply globally-configured reader options first
if reader_options:
# Then update with any locally-specified reader options
if not batch_kwargs.get("reader_options"):
batch_kwargs["reader_options"] = dict()
batch_kwargs["reader_options"].update(reader_options)
if reader_method is not None:
batch_kwargs["reader_method"] = reader_method
return batch_kwargs
def get_batch(self, batch_kwargs, batch_parameters=None):
"""class-private implementation of get_data_asset"""
if self.spark is None:
logger.error("No spark session available")
return None
reader_options = batch_kwargs.get("reader_options", {})
# We need to build batch_markers to be used with the DataFrame
batch_markers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
}
)
if "path" in batch_kwargs or "s3" in batch_kwargs:
# If both are present, let s3 override
path = batch_kwargs.get("path")
path = batch_kwargs.get("s3", path)
reader_method = batch_kwargs.get("reader_method")
reader = self.spark.read
for option in reader_options.items():
reader = reader.option(*option)
reader_fn = self._get_reader_fn(reader, reader_method, path)
df = reader_fn(path)
elif "query" in batch_kwargs:
df = self.spark.sql(batch_kwargs["query"])
elif "dataset" in batch_kwargs and isinstance(
batch_kwargs["dataset"], (DataFrame, SparkDFDataset)
):
df = batch_kwargs.get("dataset")
# We don't want to store the actual dataframe in kwargs; copy the remaining batch_kwargs
batch_kwargs = {k: batch_kwargs[k] for k in batch_kwargs if k != "dataset"}
if isinstance(df, SparkDFDataset):
# Grab just the spark_df reference, since we want to override everything else
df = df.spark_df
# Record this in the kwargs *and* the id
batch_kwargs["SparkDFRef"] = True
batch_kwargs["ge_batch_id"] = str(uuid.uuid1())
else:
raise BatchKwargsError(
"Unrecognized batch_kwargs for spark_source", batch_kwargs
)
if "limit" in batch_kwargs:
df = df.limit(batch_kwargs["limit"])
return Batch(
datasource_name=self.name,
batch_kwargs=batch_kwargs,
data=df,
batch_parameters=batch_parameters,
batch_markers=batch_markers,
data_context=self._data_context,
)
@staticmethod
def guess_reader_method_from_path(path):
if path.endswith(".csv") or path.endswith(".tsv"):
return {"reader_method": "csv"}
elif path.endswith(".parquet"):
return {"reader_method": "parquet"}
raise BatchKwargsError(
"Unable to determine reader method from path: %s" % path, {"path": path}
)
def _get_reader_fn(self, reader, reader_method=None, path=None):
"""Static helper for providing reader_fn
Args:
reader: the base spark reader to use; this should have had reader_options applied already
reader_method: the name of the reader_method to use, if specified
path (str): the path to use to guess reader_method if it was not specified
Returns:
ReaderMethod to use for the filepath
"""
if reader_method is None and path is None:
raise BatchKwargsError(
"Unable to determine spark reader function without reader_method or path.",
{"reader_method": reader_method},
)
if reader_method is None:
reader_method = self.guess_reader_method_from_path(path=path)[
"reader_method"
]
try:
if reader_method.lower() == "delta":
return reader.format("delta").load
return getattr(reader, reader_method)
except AttributeError:
raise BatchKwargsError(
"Unable to find reader_method %s in spark." % reader_method,
{"reader_method": reader_method},
)
```
#### File: great_expectations/profile/multi_batch_validation_meta_analysis.py
```python
# import logging
# from collections import defaultdict
# import collections
#
# import warnings
# from great_expectations.datasource.types import BatchKwargs
# from great_expectations.profile.metrics_store import MetricsStore
# from great_expectations.profile.metrics_utils import (
# set_nested_value_in_dict,
# get_nested_value_from_dict
# )
#
# logger = logging.getLogger(__name__)
#
#
# class MultiBatchValidationMetaAnalysis(object):
# """MultiBatchValidationMetaAnalysis takes a list of validation results
# (same expectation suite evaluated against multiple batches)
# and returns multi-batch metrics from these results.
#
# """
#
# # (expectation type, result key) -> (expectation kwargs that should become metric kwargs)
# # result key is a string or a tuple if the key is nested. same for the expectation kwargs
#
# # NOTE: Eugene: 2019-09-04: Add more entries
# EXPECTATION_DEFINED_METRICS_LOOKUP_TABLE = {
# ('expect_column_values_to_not_be_null', ('unexpected_percent',)): ('column',), # note: "," is important - it makes it a tuple!
# ('expect_column_quantile_values_to_be_between', ('observed_value', 'values')): (
# 'column', ('quantile_ranges', 'quantiles')),
#
# }
#
# @classmethod
# def add_expectation_defined_metric_for_result_key(cls, d, result, data_asset_name, batch_kwargs, metrics_store, t=()):
# for key, value in d.items():
# if isinstance(value, collections.Mapping):
# cls.add_expectation_defined_metric_for_result_key(value, result, data_asset_name, batch_kwargs, metrics_store, t + (key,))
# else:
# # result_key_lookup_key = key if t==() else (t + (key,))
# result_key_lookup_key = (t + (key,))
# full_lookup_key = (result.expectation_config.expectation_type, result_key_lookup_key)
# metric_kwargs_names = cls.EXPECTATION_DEFINED_METRICS_LOOKUP_TABLE.get(full_lookup_key)
# if metric_kwargs_names:
# metric_kwargs = {}
# for metric_kwarg_name in metric_kwargs_names:
# if isinstance(metric_kwarg_name, tuple):
# set_nested_value_in_dict(metric_kwargs, metric_kwarg_name, get_nested_value_from_dict(result.expectation_config['kwargs'], metric_kwarg_name))
# else:
# metric_kwargs[metric_kwarg_name] = result.expectation_config['kwargs'][metric_kwarg_name]
#
# metrics_store.add_single_batch_expectation_defined_metric(
# data_asset_name,
# batch_kwargs.batch_fingerprint,
# result.expectation_config.expectation_type,
# result_key_lookup_key,
# metric_kwargs,
# value)
#
# @classmethod
# def add_metrics_from_single_expectation_validation_result(cls, result, data_asset_name, batch_kwargs, metrics_store):
# """
# Extract metrics from a validation result of one expectation and store them.
# Depending on the type of the expectation, this method chooses the key
# in the result dictionary that should be returned as a metric
# (e.g., "observed_value" or "unexpected_percent").
#
# :param result: a validation result dictionary of one expectation
# :param data_asset_name:
# :param batch_kwargs: BatchKwargs of the batch that was validated
# :param metrics_store
# """
# # NOTE: Eugene: 2019-09-04: Add more entries
# expectation_metrics = {
# # 'expect_column_distinct_values_to_be_in_set'
# # 'expect_column_kl_divergence_to_be_less_than',
# 'expect_column_max_to_be_between': {
# 'observed_value': 'column_max'
# },
# 'expect_column_mean_to_be_between': {
# 'observed_value': 'column_mean'
# },
# 'expect_column_median_to_be_between': {
# 'observed_value': 'column_median'
# },
# 'expect_column_min_to_be_between': {
# 'observed_value': 'column_min'
# },
# 'expect_column_proportion_of_unique_values_to_be_between': {
# 'observed_value': 'column_proportion_of_unique_values'
# },
# # 'expect_column_quantile_values_to_be_between',
# 'expect_column_stdev_to_be_between': {
# 'observed_value': 'column_stdev'
# },
# 'expect_column_unique_value_count_to_be_between': {
# 'observed_value': 'column_unique_count'
# },
# # 'expect_column_values_to_be_between',
# # 'expect_column_values_to_be_in_set',
# # 'expect_column_values_to_be_in_type_list',
# 'expect_column_values_to_be_unique': {
#
# },
# # 'expect_table_columns_to_match_ordered_list',
# 'expect_table_row_count_to_be_between': {
# 'observed_value': 'row_count'
# }
#
# }
#
# metrics = []
# if result.get('result'):
# entry = expectation_metrics.get(result.expectation_config.expectation_type)
# if entry:
# for key in result['result'].keys():
# metric_name = entry.get(key)
# if metric_name:
# metric_kwargs = {"column": result.expectation_config['kwargs']['column']} if result.expectation_config[
# 'kwargs'].get('column') else {}
#
# metrics_store.add_single_batch_metric(
# data_asset_name,
# batch_kwargs.batch_fingerprint,
# metric_name,
# metric_kwargs,
# result['result'][key])
#
# else:
# cls.add_expectation_defined_metric_for_result_key(result['result'], result,
# data_asset_name, batch_kwargs, metrics_store)
#
# @classmethod
# def get_metrics(cls, validation_results_list, data_context):
# """
# Get multi-batch metrics from a list of validation results
#
# :param validation_results_list: a list validation results where each item is a
# result of validating a batch against the same expectation suite
# :return: a dict: {multi-batch metric urn -> multi-batch metric}
# """
#
# # NOTE: Eugene: 2019-09-04: For now we are creating an instance of metrics store here
# # but it probably should be some singleton obtained from a factory/manager.
# metrics_store = MetricsStore()
#
# batch_kwargs_list = []
# for j, one_batch_validation_results in enumerate(validation_results_list):
# # print(json.dumps(one_batch_validation_results['meta'], indent=2))
# batch_kwargs = BatchKwargs(one_batch_validation_results['meta']['batch_kwargs'])
# batch_kwargs_list.append(batch_kwargs)
#
# # NOTE: Eugene 2019-08-25: when validation results be a typed object,
# # that object will have data_asset_name property method that will
# # return a NormalizedDataAssetName. Until then we are constructing
# # a NormalizedDataAssetName from the string that we fetch from the dictionary
# normalized_data_asset_name = data_context.normalize_data_asset_name(
# one_batch_validation_results['meta']['data_asset_name'])
# for i, result in enumerate(one_batch_validation_results['results']):
# cls.add_metrics_from_single_expectation_validation_result(result,
# normalized_data_asset_name,
# batch_kwargs,
# metrics_store)
#
# mb_metrics = metrics_store.get_multi_batch_metrics(batch_kwargs_list)
#
# return mb_metrics
```
#### File: render/renderer/slack_renderer.py
```python
import datetime
from ...core.id_dict import BatchKwargs
from .renderer import Renderer
class SlackRenderer(Renderer):
def __init__(self):
super().__init__()
def render(self, validation_result=None):
# Defaults
timestamp = datetime.datetime.strftime(
datetime.datetime.now(datetime.timezone.utc), "%x %X %Z"
)
default_text = (
"No validation occurred. Please ensure you passed a validation_result."
)
status = "Failed :x:"
title_block = {
"type": "section",
"text": {"type": "mrkdwn", "text": default_text,},
}
query = {
"blocks": [title_block],
# this abbreviated root level "text" will show up in the notification and not the message
"text": default_text,
}
# TODO improve this nested logic
if validation_result:
expectation_suite_name = validation_result.meta.get(
"expectation_suite_name", "__no_expectation_suite_name__"
)
n_checks_succeeded = validation_result.statistics["successful_expectations"]
n_checks = validation_result.statistics["evaluated_expectations"]
run_id = validation_result.meta.get("run_id", "__no_run_id__")
batch_id = BatchKwargs(
validation_result.meta.get("batch_kwargs", {})
).to_id()
check_details_text = "*{}* of *{}* expectations were met".format(
n_checks_succeeded, n_checks
)
if validation_result.success:
status = "Success :tada:"
summary_text = """*Batch Validation Status*: {}
*Expectation suite name*: `{}`
*Run ID*: `{}`
*Batch ID*: `{}`
*Timestamp*: `{}`
*Summary*: {}""".format(
status,
expectation_suite_name,
run_id,
batch_id,
timestamp,
check_details_text,
)
query["blocks"][0]["text"]["text"] = summary_text
# this abbreviated root level "text" will show up in the notification and not the message
query["text"] = "{}: {}".format(expectation_suite_name, status)
if "result_reference" in validation_result.meta:
report_element = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "- *Validation Report*: {}".format(
validation_result.meta["result_reference"]
),
},
}
query["blocks"].append(report_element)
if "dataset_reference" in validation_result.meta:
dataset_element = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "- *Validation data asset*: {}".format(
validation_result.meta["dataset_reference"]
),
},
}
query["blocks"].append(dataset_element)
custom_blocks = self._custom_blocks(evr=validation_result)
if custom_blocks:
query["blocks"].append(custom_blocks)
documentation_url = "https://docs.greatexpectations.io/en/latest/tutorials/getting_started/set_up_data_docs.html#_getting_started__set_up_data_docs"
footer_section = {
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": "Learn how to review validation results in Data Docs: {}".format(
documentation_url
),
}
],
}
divider_block = {"type": "divider"}
query["blocks"].append(divider_block)
query["blocks"].append(footer_section)
return query
def _custom_blocks(self, evr):
return None
```
#### File: great_expectations/validation_operators/util.py
```python
import logging
import requests
logger = logging.getLogger(__name__)
def send_slack_notification(query, slack_webhook):
session = requests.Session()
try:
response = session.post(url=slack_webhook, json=query)
except requests.ConnectionError:
logger.warning(
"Failed to connect to Slack webhook at {url} "
"after {max_retries} retries.".format(url=slack_webhook, max_retries=10)
)
except Exception as e:
logger.error(str(e))
else:
if response.status_code != 200:
logger.warning(
"Request to Slack webhook at {url} "
"returned error {status_code}: {text}".format(
url=slack_webhook,
status_code=response.status_code,
text=response.text,
)
)
else:
return "Slack notification succeeded."
```
#### File: tests/cli/utils.py
```python
import traceback
from _pytest.logging import LogCaptureFixture
from click.testing import Result
def assert_dict_key_and_val_in_stdout(dict_, stdout):
"""Use when stdout contains color info and command chars"""
for key, val in dict_.items():
if isinstance(val, dict):
assert key in stdout
assert_dict_key_and_val_in_stdout(val, stdout)
else:
assert key in stdout
assert str(val) in stdout
def assert_no_logging_messages_or_tracebacks(my_caplog, click_result):
"""
Use this assertion in all CLI tests unless you have a very good reason.
Without this assertion, it is easy to let errors and tracebacks bubble up
to users without being detected, unless you are manually inspecting the
console output (stderr and stdout), as well as logging output from every
test.
Usage:
```
def test_my_stuff(caplog):
...
result = runner.invoke(...)
...
assert_no_logging_messages_or_tracebacks(caplog, result)
```
:param my_caplog: the caplog pytest fixutre
:param click_result: the Result object returned from click runner.invoke()
"""
assert_no_logging_messages(my_caplog)
assert_no_tracebacks(click_result)
def assert_no_logging_messages(my_caplog):
"""
Assert no logging output messages.
:param my_caplog: the caplog pytest fixutre
"""
assert isinstance(
my_caplog, LogCaptureFixture
), "Please pass in the caplog object from your test."
messages = my_caplog.messages
assert isinstance(messages, list)
if messages:
print("Found logging messages:\n")
print("\n".join([m for m in messages]))
assert not messages
def assert_no_tracebacks(click_result):
"""
Assert no tracebacks.
:param click_result: the Result object returned from click runner.invoke()
"""
assert isinstance(
click_result, Result
), "Please pass in the click runner invoke result object from your test."
if click_result.exc_info:
# introspect the call stack to make sure no exceptions found there way through
# https://docs.python.org/2/library/sys.html#sys.exc_info
_type, value, _traceback = click_result.exc_info
if not isinstance(value, SystemExit):
# SystemExit is a known "good" exit type
print("".join(traceback.format_tb(_traceback)))
assert False, "Found exception of type {} with message {}".format(
_type, value
)
if not isinstance(click_result.exception, SystemExit):
# Ignore a SystemeExit, because some commands intentionally exit in an error state
assert not click_result.exception, "Found exception {}".format(
click_result.exception
)
assert (
"traceback" not in click_result.output.lower()
), "Found a traceback in the console output: {}".format(click_result.output)
assert (
"traceback" not in click_result.stdout.lower()
), "Found a traceback in the console output: {}".format(click_result.stdout)
try:
assert (
"traceback" not in click_result.stderr.lower()
), "Found a traceback in the console output: {}".format(click_result.stderr)
except ValueError as ve:
# sometimes stderr is not captured separately
pass
```
#### File: tests/core/test_urn.py
```python
from urllib.parse import parse_qs
import pytest
from pyparsing import ParseException
from great_expectations.core.urn import ge_urn
def test_ge_validations_urn():
# We should be able to parse validations urns
urn = (
"urn:great_expectations:validations:my_suite:expect_something.observed_value:query=s%20tring&query="
"string3&query2=string2"
)
res = ge_urn.parseString(urn)
assert res["urn_type"] == "validations"
assert res["expectation_suite_name"] == "my_suite"
assert res["metric_name"] == "expect_something.observed_value"
kwargs_dict = parse_qs(res["metric_kwargs"])
assert kwargs_dict == {"query": ["s tring", "string3"], "query2": ["string2"]}
# no kwargs is ok
urn = "urn:great_expectations:validations:my_suite:expect_something.observed_value"
res = ge_urn.parseString(urn)
assert res["urn_type"] == "validations"
assert res["expectation_suite_name"] == "my_suite"
assert res["metric_name"] == "expect_something.observed_value"
assert "metric_kwargs" not in res
def test_ge_metrics_urn():
urn = "urn:great_expectations:metrics:20200403T1234.324Z:my_suite:expect_something.observed_value:column=mycol"
res = ge_urn.parseString(urn)
assert res["urn_type"] == "metrics"
assert res["run_id"] == "20200403T1234.324Z"
assert res["expectation_suite_name"] == "my_suite"
assert res["metric_name"] == "expect_something.observed_value"
kwargs_dict = parse_qs(res["metric_kwargs"])
assert kwargs_dict == {"column": ["mycol"]}
# No kwargs is ok
urn = "urn:great_expectations:metrics:20200403T1234.324Z:my_suite:expect_something.observed_value"
res = ge_urn.parseString(urn)
assert res["urn_type"] == "metrics"
assert res["run_id"] == "20200403T1234.324Z"
assert res["expectation_suite_name"] == "my_suite"
assert res["metric_name"] == "expect_something.observed_value"
assert "kwargs_dict" not in res
def test_ge_stores_urn():
urn = "urn:great_expectations:stores:my_store:mymetric:kw=param"
res = ge_urn.parseString(urn)
assert res["urn_type"] == "stores"
assert res["store_name"] == "my_store"
assert res["metric_name"] == "mymetric"
kwargs_dict = parse_qs(res["metric_kwargs"])
assert kwargs_dict == {
"kw": ["param"],
}
# No kwargs is ok
urn = "urn:great_expectations:stores:my_store:mymetric"
res = ge_urn.parseString(urn)
assert res["urn_type"] == "stores"
assert res["store_name"] == "my_store"
assert res["metric_name"] == "mymetric"
assert "metric_kwargs" not in res
def test_invalid_urn():
# Must start with "urn:great_expectations"
with pytest.raises(ParseException) as e:
ge_urn.parseString("not_a_ge_urn")
assert "not_a_ge_urn" in e.value.line
# Must have one of the recognized types
with pytest.raises(ParseException) as e:
ge_urn.parseString("urn:great_expectations:foo:bar:baz:bin:barg")
assert "urn:great_expectations:foo:bar:baz:bin:barg" in e.value.line
# Cannot have too many parts
with pytest.raises(ParseException) as e:
ge_urn.parseString(
"urn:great_expectations:validations:foo:bar:baz:bin:barg:boo"
)
assert "urn:great_expectations:validations:foo:bar:baz:bin:barg:boo" in e.value.line
```
#### File: tests/render/test_data_documentation_site_builder.py
```python
import os
import shutil
import pytest
from freezegun import freeze_time
from great_expectations import DataContext
from great_expectations.core import RunIdentifier
from great_expectations.data_context.store import ExpectationsStore, ValidationsStore
from great_expectations.data_context.types.resource_identifiers import (
ExpectationSuiteIdentifier,
ValidationResultIdentifier,
)
from great_expectations.data_context.util import (
file_relative_path,
instantiate_class_from_config,
)
from great_expectations.render.renderer.site_builder import SiteBuilder
def assert_how_to_buttons(
context,
index_page_locator_info: str,
index_links_dict: dict,
show_how_to_buttons=True,
):
"""Helper function to assert presence or non-presence of how-to buttons and related content in various
Data Docs pages.
"""
# these are simple checks for presence of certain page elements
show_walkthrough_button = "Show Walkthrough"
walkthrough_modal = "Great Expectations Walkthrough"
cta_footer = (
"To continue exploring Great Expectations check out one of these tutorials..."
)
how_to_edit_suite_button = "How to Edit This Suite"
how_to_edit_suite_modal = "How to Edit This Expectation Suite"
action_card = "Actions"
how_to_page_elements_dict = {
"index_pages": [show_walkthrough_button, walkthrough_modal, cta_footer],
"expectation_suites": [
how_to_edit_suite_button,
how_to_edit_suite_modal,
show_walkthrough_button,
walkthrough_modal,
],
"validation_results": [
how_to_edit_suite_button,
how_to_edit_suite_modal,
show_walkthrough_button,
walkthrough_modal,
],
"profiling_results": [action_card, show_walkthrough_button, walkthrough_modal],
}
data_docs_site_dir = os.path.join(
context._context_root_directory,
context._project_config.data_docs_sites["local_site"]["store_backend"][
"base_directory"
],
)
page_paths_dict = {
"index_pages": [index_page_locator_info[7:]],
"expectation_suites": [
os.path.join(data_docs_site_dir, link_dict["filepath"])
for link_dict in index_links_dict.get("expectations_links", [])
],
"validation_results": [
os.path.join(data_docs_site_dir, link_dict["filepath"])
for link_dict in index_links_dict.get("validations_links", [])
],
"profiling_results": [
os.path.join(data_docs_site_dir, link_dict["filepath"])
for link_dict in index_links_dict.get("profiling_links", [])
],
}
for page_type, page_paths in page_paths_dict.items():
for page_path in page_paths:
with open(page_path, "r") as f:
page = f.read()
for how_to_element in how_to_page_elements_dict[page_type]:
if show_how_to_buttons:
assert how_to_element in page
else:
assert how_to_element not in page
@freeze_time("09/26/2019 13:42:41")
@pytest.mark.rendered_output
def test_configuration_driven_site_builder(
site_builder_data_context_with_html_store_titanic_random,
):
context = site_builder_data_context_with_html_store_titanic_random
context.add_validation_operator(
"validate_and_store",
{
"class_name": "ActionListValidationOperator",
"action_list": [
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
"target_store_name": "validations_store",
},
},
{
"name": "extract_and_store_eval_parameters",
"action": {
"class_name": "StoreEvaluationParametersAction",
"target_store_name": "evaluation_parameter_store",
},
},
],
},
)
# profiling the Titanic datasource will generate one expectation suite and one validation
# that is a profiling result
datasource_name = "titanic"
data_asset_name = "Titanic"
profiler_name = "BasicDatasetProfiler"
generator_name = "subdir_reader"
context.profile_datasource(datasource_name)
# creating another validation result using the profiler's suite (no need to use a new expectation suite
# for this test). having two validation results - one with run id "profiling" - allows us to test
# the logic of run_name_filter that helps filtering validation results to be included in
# the profiling and the validation sections.
batch_kwargs = context.build_batch_kwargs(
datasource=datasource_name,
batch_kwargs_generator=generator_name,
data_asset_name=data_asset_name,
)
expectation_suite_name = "{}.{}.{}.{}".format(
datasource_name, generator_name, data_asset_name, profiler_name
)
batch = context.get_batch(
batch_kwargs=batch_kwargs, expectation_suite_name=expectation_suite_name,
)
run_id = RunIdentifier(run_name="test_run_id_12345")
context.run_validation_operator(
assets_to_validate=[batch],
run_id=run_id,
validation_operator_name="validate_and_store",
)
data_docs_config = context._project_config.data_docs_sites
local_site_config = data_docs_config["local_site"]
validations_set = set(context.stores["validations_store"].list_keys())
assert len(validations_set) == 6
assert (
ValidationResultIdentifier(
expectation_suite_identifier=ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
),
run_id="test_run_id_12345",
batch_identifier=batch.batch_id,
)
in validations_set
)
assert (
ValidationResultIdentifier(
expectation_suite_identifier=ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
),
run_id="profiling",
batch_identifier=batch.batch_id,
)
in validations_set
)
assert (
ValidationResultIdentifier(
expectation_suite_identifier=ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
),
run_id="profiling",
batch_identifier=batch.batch_id,
)
in validations_set
)
assert (
ValidationResultIdentifier(
expectation_suite_identifier=ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
),
run_id="profiling",
batch_identifier=batch.batch_id,
)
in validations_set
)
site_builder = SiteBuilder(
data_context=context,
runtime_environment={"root_directory": context.root_directory},
**local_site_config
)
res = site_builder.build()
index_page_locator_info = res[0]
index_links_dict = res[1]
# assert that how-to buttons and related elements are rendered (default behavior)
assert_how_to_buttons(context, index_page_locator_info, index_links_dict)
# print(json.dumps(index_page_locator_info, indent=2))
assert (
index_page_locator_info
== "file://"
+ context.root_directory
+ "/uncommitted/data_docs/local_site/index.html"
)
# print(json.dumps(index_links_dict, indent=2))
assert "site_name" in index_links_dict
assert "expectations_links" in index_links_dict
assert len(index_links_dict["expectations_links"]) == 5
assert "validations_links" in index_links_dict
assert (
len(index_links_dict["validations_links"]) == 1
), """
The only rendered validation should be the one not generated by the profiler
"""
assert "profiling_links" in index_links_dict
assert len(index_links_dict["profiling_links"]) == 5
# save documentation locally
os.makedirs("./tests/render/output", exist_ok=True)
os.makedirs("./tests/render/output/documentation", exist_ok=True)
if os.path.isdir("./tests/render/output/documentation"):
shutil.rmtree("./tests/render/output/documentation")
shutil.copytree(
os.path.join(
site_builder_data_context_with_html_store_titanic_random.root_directory,
"uncommitted/data_docs/",
),
"./tests/render/output/documentation",
)
# let's create another validation result and run the site builder to add it
# to the data docs
# the operator does not have an StoreValidationResultAction action configured, so the site
# will not be updated without our call to site builder
expectation_suite_path_component = expectation_suite_name.replace(".", "/")
validation_result_page_path = os.path.join(
site_builder.site_index_builder.target_store.store_backends[
ValidationResultIdentifier
].full_base_directory,
"validations",
expectation_suite_path_component,
run_id.run_name,
run_id.run_time.strftime("%Y%m%dT%H%M%S.%fZ"),
batch.batch_id + ".html",
)
ts_last_mod_0 = os.path.getmtime(validation_result_page_path)
run_id = RunIdentifier(run_name="test_run_id_12346")
operator_result = context.run_validation_operator(
assets_to_validate=[batch],
run_id=run_id,
validation_operator_name="validate_and_store",
)
validation_result_id = operator_result.list_validation_result_identifiers()[0]
res = site_builder.build(resource_identifiers=[validation_result_id])
index_links_dict = res[1]
# verify that an additional validation result HTML file was generated
assert len(index_links_dict["validations_links"]) == 2
site_builder.site_index_builder.target_store.store_backends[
ValidationResultIdentifier
].full_base_directory
# verify that the validation result HTML file rendered in the previous run was NOT updated
ts_last_mod_1 = os.path.getmtime(validation_result_page_path)
assert ts_last_mod_0 == ts_last_mod_1
# verify that the new method of the site builder that returns the URL of the HTML file that renders
# a resource
new_validation_result_page_path = os.path.join(
site_builder.site_index_builder.target_store.store_backends[
ValidationResultIdentifier
].full_base_directory,
"validations",
expectation_suite_path_component,
run_id.run_name,
run_id.run_time.strftime("%Y%m%dT%H%M%S.%fZ"),
batch.batch_id + ".html",
)
html_url = site_builder.get_resource_url(resource_identifier=validation_result_id)
assert "file://" + new_validation_result_page_path == html_url
html_url = site_builder.get_resource_url()
assert (
"file://"
+ os.path.join(
site_builder.site_index_builder.target_store.store_backends[
ValidationResultIdentifier
].full_base_directory,
"index.html",
)
== html_url
)
team_site_config = data_docs_config["team_site"]
team_site_builder = SiteBuilder(
data_context=context,
runtime_environment={"root_directory": context.root_directory},
**team_site_config
)
team_site_builder.clean_site()
obs = [
url_dict
for url_dict in context.get_docs_sites_urls(site_name="team_site")
if url_dict.get("site_url")
]
assert len(obs) == 0
# exercise clean_site
site_builder.clean_site()
obs = [
url_dict
for url_dict in context.get_docs_sites_urls()
if url_dict.get("site_url")
]
assert len(obs) == 0
# restore site
context = site_builder_data_context_with_html_store_titanic_random
site_builder = SiteBuilder(
data_context=context,
runtime_environment={"root_directory": context.root_directory},
**local_site_config
)
res = site_builder.build()
@pytest.mark.rendered_output
def test_configuration_driven_site_builder_without_how_to_buttons(
site_builder_data_context_with_html_store_titanic_random,
):
context = site_builder_data_context_with_html_store_titanic_random
context.add_validation_operator(
"validate_and_store",
{
"class_name": "ActionListValidationOperator",
"action_list": [
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
"target_store_name": "validations_store",
},
},
{
"name": "extract_and_store_eval_parameters",
"action": {
"class_name": "StoreEvaluationParametersAction",
"target_store_name": "evaluation_parameter_store",
},
},
],
},
)
# profiling the Titanic datasource will generate one expectation suite and one validation
# that is a profiling result
datasource_name = "titanic"
data_asset_name = "Titanic"
profiler_name = "BasicDatasetProfiler"
generator_name = "subdir_reader"
context.profile_datasource(datasource_name)
# creating another validation result using the profiler's suite (no need to use a new expectation suite
# for this test). having two validation results - one with run id "profiling" - allows us to test
# the logic of run_name_filter that helps filtering validation results to be included in
# the profiling and the validation sections.
batch_kwargs = context.build_batch_kwargs(
datasource=datasource_name,
batch_kwargs_generator=generator_name,
name=data_asset_name,
)
expectation_suite_name = "{}.{}.{}.{}".format(
datasource_name, generator_name, data_asset_name, profiler_name
)
batch = context.get_batch(
batch_kwargs=batch_kwargs, expectation_suite_name=expectation_suite_name,
)
run_id = "test_run_id_12345"
context.run_validation_operator(
assets_to_validate=[batch],
run_id=run_id,
validation_operator_name="validate_and_store",
)
data_docs_config = context._project_config.data_docs_sites
local_site_config = data_docs_config["local_site"]
# set this flag to false in config to hide how-to buttons and related elements
local_site_config["show_how_to_buttons"] = False
site_builder = SiteBuilder(
data_context=context,
runtime_environment={"root_directory": context.root_directory},
**local_site_config
)
res = site_builder.build()
index_page_locator_info = res[0]
index_links_dict = res[1]
assert_how_to_buttons(
context, index_page_locator_info, index_links_dict, show_how_to_buttons=False
)
def test_site_builder_with_custom_site_section_builders_config(tmp_path_factory):
"""Test that site builder can handle partially specified custom site_section_builders config"""
base_dir = str(tmp_path_factory.mktemp("project_dir"))
project_dir = os.path.join(base_dir, "project_path")
os.mkdir(project_dir)
# fixture config swaps site section builder source stores and specifies custom run_name_filters
shutil.copy(
file_relative_path(
__file__, "../test_fixtures/great_expectations_custom_local_site_config.yml"
),
str(os.path.join(project_dir, "great_expectations.yml")),
)
context = DataContext(context_root_dir=project_dir)
local_site_config = context._project_config.data_docs_sites.get("local_site")
module_name = "great_expectations.render.renderer.site_builder"
site_builder = instantiate_class_from_config(
config=local_site_config,
runtime_environment={
"data_context": context,
"root_directory": context.root_directory,
"site_name": "local_site",
},
config_defaults={"module_name": module_name},
)
site_section_builders = site_builder.site_section_builders
expectations_site_section_builder = site_section_builders["expectations"]
assert isinstance(expectations_site_section_builder.source_store, ValidationsStore)
validations_site_section_builder = site_section_builders["validations"]
assert isinstance(validations_site_section_builder.source_store, ExpectationsStore)
assert validations_site_section_builder.run_name_filter == {
"ne": "custom_validations_filter"
}
profiling_site_section_builder = site_section_builders["profiling"]
assert isinstance(validations_site_section_builder.source_store, ExpectationsStore)
assert profiling_site_section_builder.run_name_filter == {
"eq": "custom_profiling_filter"
}
@freeze_time("09/24/2019 23:18:36")
def test_site_builder_usage_statistics_enabled(
site_builder_data_context_with_html_store_titanic_random,
):
context = site_builder_data_context_with_html_store_titanic_random
sites = (
site_builder_data_context_with_html_store_titanic_random._project_config_with_variables_substituted.data_docs_sites
)
local_site_config = sites["local_site"]
site_builder = instantiate_class_from_config(
config=local_site_config,
runtime_environment={
"data_context": context,
"root_directory": context.root_directory,
"site_name": "local_site",
},
config_defaults={
"module_name": "great_expectations.render.renderer.site_builder"
},
)
site_builder_return_obj = site_builder.build()
index_page_path = site_builder_return_obj[0]
links_dict = site_builder_return_obj[1]
expectation_suite_pages = [
file_relative_path(index_page_path, expectation_suite_link_dict["filepath"])
for expectation_suite_link_dict in links_dict["expectations_links"]
]
profiling_results_pages = [
file_relative_path(index_page_path, profiling_link_dict["filepath"])
for profiling_link_dict in links_dict["profiling_links"]
]
page_paths_to_check = (
[index_page_path] + expectation_suite_pages + profiling_results_pages
)
expected_logo_url = "https://great-expectations-web-assets.s3.us-east-2.amazonaws.com/logo-long.png?d=20190924T231836.000000Z&dataContextId=f43d4897-385f-4366-82b0-1a8eda2bf79c"
for page_path in page_paths_to_check:
with open(page_path[7:]) as f:
page_contents = f.read()
assert expected_logo_url in page_contents
@freeze_time("09/24/2019 23:18:36")
def test_site_builder_usage_statistics_disabled(
site_builder_data_context_with_html_store_titanic_random,
):
context = site_builder_data_context_with_html_store_titanic_random
context._project_config.anonymous_usage_statistics = {
"enabled": False,
"data_context_id": "f43d4897-385f-4366-82b0-1a8eda2bf79c",
}
data_context_id = context.anonymous_usage_statistics["data_context_id"]
sites = (
site_builder_data_context_with_html_store_titanic_random._project_config_with_variables_substituted.data_docs_sites
)
local_site_config = sites["local_site"]
site_builder = instantiate_class_from_config(
config=local_site_config,
runtime_environment={
"data_context": context,
"root_directory": context.root_directory,
"site_name": "local_site",
},
config_defaults={
"module_name": "great_expectations.render.renderer.site_builder"
},
)
site_builder_return_obj = site_builder.build()
index_page_path = site_builder_return_obj[0]
links_dict = site_builder_return_obj[1]
expectation_suite_pages = [
file_relative_path(index_page_path, expectation_suite_link_dict["filepath"])
for expectation_suite_link_dict in links_dict["expectations_links"]
]
profiling_results_pages = [
file_relative_path(index_page_path, profiling_link_dict["filepath"])
for profiling_link_dict in links_dict["profiling_links"]
]
page_paths_to_check = (
[index_page_path] + expectation_suite_pages + profiling_results_pages
)
expected_logo_url = "https://great-expectations-web-assets.s3.us-east-2.amazonaws.com/logo-long.png?d=20190924T231836.000000Z"
for page_path in page_paths_to_check:
with open(page_path[7:]) as f:
page_contents = f.read()
assert expected_logo_url in page_contents
assert data_context_id not in page_contents
``` |
{
"source": "joshuatee/Posh-Stem",
"score": 3
} |
#### File: examples/client_usage/Using_SocksiPy.py
```python
import socks # SocksiPy module
import socket
import urllib
# Set socks proxy and wrap the urllib module
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1', SOCKS_PORT)
socket.socket = socks.socksocket
# Perform DNS resolution through the socket
def getaddrinfo(*args):
return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
socket.getaddrinfo = getaddrinfo
def query(url):
"""
Uses urllib to fetch a site using SocksiPy for Tor over the SOCKS_PORT.
"""
try:
return urllib.urlopen(url).read()
except:
return "Unable to reach %s" % url
``` |
{
"source": "joshuathayer/spot",
"score": 2
} |
#### File: joshuathayer/spot/example.py
```python
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, \
QVBoxLayout, QListWidget, QListWidgetItem
import spot.system
import logging
import time
import requests
import uuid
import random
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger(__name__)
class Timer:
def act(self, msg, tell, create):
target = msg['target']
target_msg = msg['target_msg']
time.sleep(msg['sleep'])
tell(target, target_msg)
class Counter:
def __init__(self):
self.count = 0
def act(self, msg, tell, create):
self.count += 1
tell('fetcher', self.count)
class Fetcher:
def act(self, msg, tell, create):
req_id = str(uuid.uuid4())
tell('receiver', {'req_id': req_id,
'action': 'newreq'})
timed_actor = create(Timer())
tell(timed_actor.name, {'sleep': 2,
'target': 'receiver',
'target_msg': {'req_id': req_id,
'action': 'timeout'}})
# simulate a slow connection
time.sleep(random.randint(0, 3))
response = requests.get("https://jsonplaceholder.typicode.com/todos/{}".format(msg))
resp = response.json()
tell('receiver', {'req_id': req_id,
'action': 'resp',
'resp': resp})
class Receiver:
def __init__(self):
self.current_reqs = {}
def act(self, msg, tell, create):
req_id = msg['req_id']
action = msg['action']
if action == 'newreq':
self.current_reqs[req_id] = True
elif action == 'timeout' and req_id in self.current_reqs:
logger.info("Request timed out!")
del self.current_reqs[req_id]
elif action == 'resp' and req_id in self.current_reqs:
del self.current_reqs[req_id]
resp = msg['resp']
tell('db', {'type': 'new-todo',
'body': resp})
tell('item-list', {'type': 'new-todo',
'body': resp})
else:
logger.warn("Not sure what to do with {} {}".format(action, req_id))
# single point of DB mutation
class DB:
def __init__(self):
self.state = {}
def act(self, msg, tell, create):
msg_type = msg['type']
if msg_type == 'new-todo':
title = msg['body']['title']
msg_id = msg['body']['id']
self.state[msg_id] = title
class ItemList:
def __init__(self, item_list):
self.item_list = item_list
def act(self, msg, tell, create):
msg_type = msg['type']
if msg_type == 'new-todo':
title = msg['body']['title']
QListWidgetItem(title, self.item_list)
app = QApplication([])
window = QWidget()
layout = QVBoxLayout()
get = QPushButton('Fetch Item')
item_list = QListWidget()
system = spot.system.ActorSystem(app)
system.create_actor(Counter(), 'counter')
system.create_actor(DB(), 'db')
system.create_actor(Fetcher(), 'fetcher')
system.create_actor(Receiver(), 'receiver')
system.create_actor(ItemList(item_list), 'item-list')
get.clicked.connect(lambda: system.tell('counter', "click!"))
layout.addWidget(get)
layout.addWidget(item_list)
window.setLayout(layout)
window.show()
app.exec_()
```
#### File: spot/spot/system.py
```python
import uuid
import logging
import random
import collections
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, \
QEvent, QRunnable, QThreadPool, QMutex
logger = logging.getLogger(__name__)
class Actor(QRunnable):
done = pyqtSignal()
def __init__(self, runnable, tell, create, done, name=None):
super().__init__()
if name is None:
name = str(uuid.uuid4())
self.setAutoDelete(False)
self.runnable = runnable
self.tell = tell
self.create = create
self.name = name
self.done = done
self.inbox = collections.deque()
@pyqtSlot()
def run(self):
# blocks...
if len(list(self.inbox)) > 0:
self.runnable.act(self.inbox.pop(), self.tell, self.create)
else:
logger.warning("Unexpectedly ran an actor ({}) with an empty inbox.".format(self.name))
# unblocked...
self.done(self.name)
class ActorSystem(QObject):
actor_event = pyqtSignal(object)
def __init__(self, app):
super().__init__()
self.app = app
self.actors = {}
self.threads = QThreadPool()
self.tickmutex = QMutex()
self.running = set()
self.actor_event.connect(self.event)
def event(self, e):
self.tick()
return False
def create_actor(self, actor_class, actor_name=None):
actor = Actor(actor_class,
lambda t, m: self.tell(t, m),
lambda a: self.create_actor(a),
self.actor_done,
actor_name)
name = actor.name
if name in self.actors:
logger.info("Replacing existing actor at {}".format(name))
self.actors[name] = actor
self.actor_event.emit(None)
return actor
def tell(self, target, message):
# this can run in an arbitrary thread
# we'll assume `appendLeft` is thread safe
if target in self.actors:
self.actors[target].inbox.appendleft(message)
else:
logger.info("Was asked to add to an actor which does not exist")
self.actor_event.emit(None)
def actor_done(self, name):
self.running.remove(name)
self.actor_event.emit(None)
def tick(self):
if self.tickmutex.tryLock():
readys = list(filter(lambda v: True
if len(v.inbox) > 0
and v.name not in self.running
else False, self.actors.values()))
random.shuffle(readys)
for actor in readys:
self.running.add(actor.name)
self.threads.start(actor)
self.tickmutex.unlock()
else:
logger.info("Failed to get tick mutex")
``` |
{
"source": "joshuathompsonlindley/OpenWordle",
"score": 2
} |
#### File: support/preprocessors/transform_env.py
```python
import argparse
import os
import re
import subprocess
from typing import Dict, List
CAPTURE = re.compile(r'{env: (.*?)}')
parser = argparse.ArgumentParser(
description='Transforms environment variables to their values in code.')
parser.add_argument('-f', '--files', nargs='+',
help='List of files to process. (Required)', required=True)
def get_sed_commands(path: str) -> List[List[str]]:
commitable_changes: List[List[str]] = []
file: str = ''
with open(path, 'r', encoding="UTF-8") as opened:
file = ''.join(opened.readlines())
matches = re.findall(CAPTURE, file)
for match in matches:
commitable_changes.append([
'sed', '-i', '-e',
's/{env: ' + match + '}/' + os.environ[match] + '/g',
path
])
return commitable_changes
def processor(file_list: List[str]) -> Dict[str, List[List[str]]]:
change_list: Dict[str, List[List[str]]] = {}
for file in file_list:
commands = get_sed_commands(file)
if commands:
change_list[file] = commands
return change_list
def commit_changes(change_list: Dict[str, List[List[str]]]) -> None:
for file, commands in change_list.items():
print(f'+ Transforming environment variables for file {file}')
for command in commands:
try:
subprocess.run(command, check=True)
except subprocess.CalledProcessError:
print('+ Error running that command.')
if __name__ == '__main__':
args = parser.parse_args()
files: List[str] = parser.parse_args()._get_kwargs()[0][1]
changes: Dict[str, List[List[str]]] = processor(files)
commit_changes(changes)
```
#### File: OpenWordle/openwordle_server/model.py
```python
from datetime import datetime
from typing import Optional
from openwordle_server import database
class Words(database.Model):
__tablename__ = 'words'
date = database.Column(database.Text, primary_key=True,
nullable=False, unique=True)
word = database.Column(database.Text, nullable=False, unique=True)
def get_word_for_date() -> str:
date: datetime = datetime.today()
date_string: str = date.strftime('%Y%m%d')
result: Words = Words.query.filter(Words.date == date_string).first()
word: str = result.word
return word
def get_word_by_word(word: str) -> Optional[Words]:
return Words.query.filter(Words.word == word).first()
```
#### File: test/integration/test_is_valid_word_api.py
```python
from typing import List
from openwordle_server.config import Config
from openwordle_server.test.context import OpenWordleApiTestCase, ApiResponse
class IsValidWordApiTest(OpenWordleApiTestCase):
def setUp(self) -> None:
OpenWordleApiTestCase.setUpClass()
self.correct_words: List[str] = []
self.wrong_words: List[str] = [
('a' * Config.GAMEBOARD_ROW_LENGTH),
('b' * Config.GAMEBOARD_ROW_LENGTH),
('c' * Config.GAMEBOARD_ROW_LENGTH),
('d' * Config.GAMEBOARD_ROW_LENGTH),
('e' * Config.GAMEBOARD_ROW_LENGTH)
]
with open(Config.TXT_FILE, 'r', encoding='UTF-8') as file:
for _ in range(5):
line: str = file.readline().lower().replace('\n', '')
self.correct_words.append(line)
def get(self, word: str) -> ApiResponse:
request = self.api.get(
f'/is_valid_word/{word}',
follow_redirects=True
)
return ApiResponse(
request.status_code, request.json.get('is_valid_word')
)
def test_is_valid_word_correct_1(self):
result: ApiResponse = self.get(self.correct_words[0])
self.assertEqual(result.response_code, 200)
self.assertTrue(result.response_txt)
def test_is_valid_word_correct_2(self):
result: ApiResponse = self.get(self.correct_words[1])
self.assertEqual(result.response_code, 200)
self.assertTrue(result.response_txt)
def test_is_valid_word_correct_3(self):
result: ApiResponse = self.get(self.correct_words[2])
self.assertEqual(result.response_code, 200)
self.assertTrue(result.response_txt)
def test_is_valid_word_correct_4(self):
result: ApiResponse = self.get(self.correct_words[3])
self.assertEqual(result.response_code, 200)
self.assertTrue(result.response_txt)
def test_is_valid_word_correct_5(self):
result: ApiResponse = self.get(self.correct_words[4])
self.assertEqual(result.response_code, 200)
self.assertTrue(result.response_txt)
def test_is_valid_word_wrong_1(self):
result: ApiResponse = self.get(self.wrong_words[0])
self.assertEqual(result.response_code, 200)
self.assertFalse(result.response_txt)
def test_is_valid_word_wrong_2(self):
result: ApiResponse = self.get(self.wrong_words[1])
self.assertEqual(result.response_code, 200)
self.assertFalse(result.response_txt)
def test_is_valid_word_wrong_3(self):
result: ApiResponse = self.get(self.wrong_words[2])
self.assertEqual(result.response_code, 200)
self.assertFalse(result.response_txt)
def test_is_valid_word_wrong_4(self):
result: ApiResponse = self.get(self.wrong_words[3])
self.assertEqual(result.response_code, 200)
self.assertFalse(result.response_txt)
def test_is_valid_word_wrong_5(self):
result: ApiResponse = self.get(self.wrong_words[4])
self.assertEqual(result.response_code, 200)
self.assertFalse(result.response_txt)
```
#### File: test/unit/test_highlight_api.py
```python
from unittest import TestCase
from openwordle_server.highlight import highlight_word
class HighlightAPITestCase(TestCase):
def test_wrong_word(self) -> None:
correct_word: str = 'tests'
given_word: str = 'qwryu'
result: str = highlight_word(given_word, correct_word)
expected: str = '-----'
self.assertEqual(result, expected)
def test_correct_word(self) -> None:
correct_word: str = 'tests'
given_word: str = 'tests'
result: str = highlight_word(given_word, correct_word)
expected: str = '+++++'
self.assertEqual(result, expected)
def test_simple_most_correct_wrong_letter(self) -> None:
correct_word: str = 'tests'
given_word: str = 'testa'
result: str = highlight_word(given_word, correct_word)
expected: str = '++++-'
self.assertEqual(result, expected)
def test_complex_most_correct_wrong_letter(self) -> None:
correct_word: str = 'tests'
given_word: str = 'tasts'
result: str = highlight_word(given_word, correct_word)
expected: str = '+-+++'
self.assertEqual(result, expected)
def test_simple_jumbled_word(self) -> None:
correct_word: str = 'tests'
given_word: str = 'stest'
result: str = highlight_word(given_word, correct_word)
expected: str = '?????'
self.assertEqual(result, expected)
def test_complex_jumbled_word(self) -> None:
correct_word: str = 'tests'
given_word: str = 'tasst'
result: str = highlight_word(given_word, correct_word)
expected: str = '+-+??'
self.assertEqual(result, expected)
def test_lookahead_jumbled_word_in_right_place(self) -> None:
correct_word: str = 'tests'
given_word: str = 'sests'
result: str = highlight_word(given_word, correct_word)
expected: str = '-++++'
self.assertEqual(result, expected)
def test_lookahead_jumbled_word_in_wrong_place(self) -> None:
correct_word: str = 'tests'
given_word: str = 'sestt'
result: str = highlight_word(given_word, correct_word)
expected: str = '?+++?'
self.assertEqual(result, expected)
``` |
{
"source": "JoshuaTianYangLiu/tcgen",
"score": 4
} |
#### File: tcgen/tcgen/generator.py
```python
from abc import ABC, abstractmethod
class Generator(ABC):
@abstractmethod
def generate(self, case_num):
pass
def p(self, *args):
self.output += ' '.join(map(str, args))
self.output += '\n'
print = p
def get_test_cases(self, N):
ret = []
for case_num in range(N):
self.output = ''
self.generate(case_num)
ret.append(self.output)
return ret
def get_test_case(self):
self.output = ''
self.generate(1)
return self.output
```
#### File: tcgen/tests/test_random.py
```python
from tcgen.utils.constants import LOWERCASE
from tcgen.utils import random, InvalidRangeException
import pytest
class TestRandom:
def setup_method(self):
random.seed(0)
def test_randint(self):
with pytest.raises(TypeError):
random.randint()
with pytest.raises(TypeError):
random.randint(1, 2, 3, 4)
with pytest.raises(InvalidRangeException):
random.randint(10, 1)
with pytest.raises(InvalidRangeException):
random.randint(1, 2, False)
assert random.randint(1, 100000) == 50495
assert random.randint(L=12345, U=123456) == 111691
assert random.randint(1, 3, False) == 2
def test_wrandint(self):
with pytest.raises(TypeError):
random.wrandint()
with pytest.raises(InvalidRangeException):
random.wrandint(10, 1)
with pytest.raises(InvalidRangeException):
random.wrandint(1, 2, inclusive=False)
assert random.wrandint(1, 100000) == 99347
assert random.wrandint(1, 3, inclusive=False) == 2
assert random.wrandint(1, 100000, -10) == 12430
def test_noise(self):
with pytest.raises(InvalidRangeException):
random.noise(10, 1, [1, 1, 2, 3, 4])
assert random.noise(1, 1000, [0, 0, 0, 0, 9999, 0, 0]) == [865, 395, 777, 912, 10430, 42, 266]
assert random.noise(1, 100, []) == []
def test_randfloat(self):
with pytest.raises(InvalidRangeException):
random.randfloat(1.15, 1.13, 2)
with pytest.raises(InvalidRangeException):
random.randfloat(1.00, 1.02, 1, inclusive=False)
with pytest.raises(InvalidRangeException):
random.randfloat(1.0, 1.1, 1, inclusive=False)
assert random.randfloat(1.15, 2.35, 2) == 2.22
assert random.randfloat(1.15, 2.35, 3) == 1.938
assert random.randfloat(100, 200, 1, inclusive=False) == 177.7
assert random.randfloat(1.00, 1.02, 2, inclusive=False) == 1.01
assert random.randfloat(1.0, 1.2, 1, inclusive=False) == 1.1
def test_wrandfloat(self):
with pytest.raises(InvalidRangeException):
random.wrandfloat(1.15, 1.13, 2)
with pytest.raises(InvalidRangeException):
random.wrandfloat(1.00, 1.02, 1, inclusive=False)
with pytest.raises(InvalidRangeException):
random.wrandfloat(1.0, 1.1, 1, inclusive=False)
assert random.wrandfloat(1.15, 2.35, 2) == 2.27
assert random.wrandfloat(1.15, 2.35, 3, wcnt=-10) == 1.435
assert random.wrandfloat(100, 200, 1, inclusive=False) == 181.9
assert random.wrandfloat(100, 200, 1, wcnt=-10, inclusive=False) == 110.2
def test_choice(self):
with pytest.raises(TypeError):
random.choice('')
assert random.choice('.#') == '#'
assert random.choice('.#') == '#'
assert random.choice('.#') == '.'
assert random.choice(LOWERCASE) == 'i'
def test_wchoice(self):
assert random.wchoice(LOWERCASE, list(range(1, len(LOWERCASE) + 1)), wcnt=10) == 'a'
assert random.wchoice(LOWERCASE, list(range(1, len(LOWERCASE) + 1)), wcnt=-50) == 'z'
with pytest.raises(TypeError):
random.wchoice('', [], wcnt=-50)
with pytest.raises(TypeError):
random.wchoice('aaa', [], wcnt=-50)
def test_randprime(self):
with pytest.raises(TypeError):
random.randprime()
with pytest.raises(TypeError):
random.randprime(1, 2, 3, 4)
with pytest.raises(InvalidRangeException):
random.randprime(10, 1)
with pytest.raises(InvalidRangeException):
random.randprime(1, 2, False)
assert random.randprime(1, 100000) == 50497
assert random.randprime(L=12345, U=123456) == 111697
assert random.randprime(1, 3, False) == 2
def test_wrandprime(self):
with pytest.raises(TypeError):
random.wrandprime()
with pytest.raises(InvalidRangeException):
random.wrandprime(10, 1)
with pytest.raises(InvalidRangeException):
random.wrandprime(1, 2, inclusive=False)
with pytest.raises(ValueError):
random.wrandprime(8, 10)
assert random.wrandprime(1, 100000) == 99347
assert random.wrandprime(1, 3, inclusive=False) == 2
assert random.wrandprime(1, 100000, -10) == 12433
``` |
{
"source": "JoshuaTPritchett/30DaysCoding",
"score": 2
} |
#### File: python_programming/basics/exceptions.py
```python
class MyError(Exception):
pass
class MylistError(MyError):
def __init__(self, expression, message):
self.expression_error = expression
self.message = message
```
#### File: python_programming/basics/module.py
```python
def print_test():
print 'yo you made your first module'
test_string = 'YO THIS IS A TEST STRING'
```
#### File: python_programming/basics/mylist.py
```python
import random
class MyList(object):
def __init__(self):
self.mylist = []
def fill_list(self):
for i in xrange(60):
self.mylist.append(random.randint(1,1000))
def empty_list(self):
#empties memory location
# self.mylist = [] will repoint the pointer
# to a new list in memory
self.mylist[:] = []
def append(self, value):
self.mylist.append(value)
def append_position(self, position, value):
self.mylist.insert(position, value)
```
#### File: python_programming/basics/new_basic.py
```python
try:
from mylist import Mylist
except ImportError as lerr:
print lerr
Mylist = None
try:
from exceptions import MyListError
except ImportError as merr:
print merr
MyListError = None
from sys import exit
from mylist import MyList
def end(reason):
print reason, "Exiting.."
exit(0)
def start():
try:
mlist = MyList()
mlist.fill_list()
for value in mlist.mylist:
print value
mlist.mylist.append(60)
if 60 in mlist.mylist:
end("This is over!")
except MyListError as lerr:
print lerr
#:HMM
start()
```
#### File: python_programming/test_basics/number.py
```python
import math
import unittest
"""
Bruh here is a fun test of basic python skills
"""
def convert_binary(num):
test = []
a = num
while a > 0:
test.insert(0,str(a%2))#little edian format
a = a/(2)
ret_value = ''.join(e for e in test) #create a string
return ret_value
def convert_binary_decimal(str_num):
a = 0
twos_power = 0
#convert back to big endian
for s in str_num[::-1]:
if s == '1':
a = a + 2 ** twos_power
twos_power = twos_power + 1
return a
def convert_binary_octal(str_num):
octal_string = ''
twos_power = 0
tracking_num = 0
for s in str_num[::-1]:
if s == '1':
tracking_num = tracking_num + (2 ** twos_power)
twos_power = twos_power + 1
if twos_power % 3 == 0:
octal_string = str(tracking_num) + octal_string
twos_power = 0
tracking_num = 0
if tracking_num != 0:
octal_string = str(tracking_num) + octal_string
return octal_string
def convert_binary_hex():
return
def built_in_binary(x):
return bin(x)[2:]
print convert_binary(8)
print convert_binary_octal('100011')
print convert_binary_octal('0111')
print convert_binary_octal('1000')
print convert_binary_decimal('1000')
#Shit I suck lemme practice that slicing stuff....
"""
s = '<NAME>'
print s[4:8] # from 4-8
print s[4:] # to end of string
print s[:4] # 1-4
print s[:] #why? well because...
print s[:6] + s[6:] #this is trill but liek ....
print s[-3:-1]
print s[4:8:1] #from 4-8 skip by 1
print s[4:8:2] #from 4-8 skip by 2
print s[8:4:-1] # 8-4 going by 1
print s[4::-1] #from 4-1 going by -1 going backwards
print s[:4:-1] #end of string to 4 -1 going backwards
"""
#LETS PRACTICE HERP
#print s[len(s): -3:1]
#print s[0:1]
#print s[6:9]
#print s[len(s):-3:1]
#str_num = '01000'
#list_A = [str_num[i:i-3] for i in range(len(str_num) - 1, -1, -3)]
"""for i in range(len(str_num), 0, -n):
if i - n > 0:
print str_num[i-n:i]
else:
print str_num[i - (i % n): i]
"""
#GOT IT
``` |
{
"source": "joshuauk1026/smt",
"score": 3
} |
#### File: applications/tests/test_mixed_integer.py
```python
import unittest
import numpy as np
import matplotlib
matplotlib.use("Agg")
from smt.applications.mixed_integer import (
MixedIntegerContext,
MixedIntegerSamplingMethod,
FLOAT,
ENUM,
INT,
check_xspec_consistency,
unfold_xlimits_with_continuous_limits,
fold_with_enum_index,
unfold_with_enum_mask,
compute_unfolded_dimension,
cast_to_enum_value,
cast_to_mixed_integer,
)
from smt.problems import Sphere
from smt.sampling_methods import LHS
from smt.surrogate_models import KRG
class TestMixedInteger(unittest.TestCase):
def test_check_xspec_consistency(self):
xtypes = [FLOAT, (ENUM, 3), INT]
xlimits = [[-10, 10], ["blue", "red", "green"]] # Bad dimension
with self.assertRaises(ValueError):
check_xspec_consistency(xtypes, xlimits)
xtypes = [FLOAT, (ENUM, 3), INT]
xlimits = [[-10, 10], ["blue", "red"], [-10, 10]] # Bad enum
with self.assertRaises(ValueError):
check_xspec_consistency(xtypes, xlimits)
def test_krg_mixed_3D(self):
xtypes = [FLOAT, (ENUM, 3), INT]
xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]]
mixint = MixedIntegerContext(xtypes, xlimits)
sm = mixint.build_surrogate_model(KRG(print_prediction=False))
sampling = mixint.build_sampling_method(LHS, criterion="m")
fun = Sphere(ndim=3)
xt = sampling(20)
yt = fun(xt)
sm.set_training_values(xt, yt)
sm.train()
eq_check = True
for i in range(xt.shape[0]):
if abs(float(xt[i, :][2]) - int(float(xt[i, :][2]))) > 10e-8:
eq_check = False
if not (xt[i, :][1] == 0 or xt[i, :][1] == 1 or xt[i, :][1] == 2):
eq_check = False
self.assertTrue(eq_check)
def test_compute_unfolded_dimension(self):
xtypes = [FLOAT, (ENUM, 2)]
self.assertEqual(3, compute_unfolded_dimension(xtypes))
def test_unfold_with_enum_mask(self):
xtypes = [FLOAT, (ENUM, 2)]
x = np.array([[1.5, 1], [1.5, 0], [1.5, 1]])
expected = [[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]]
self.assertListEqual(expected, unfold_with_enum_mask(xtypes, x).tolist())
def test_unfold_with_enum_mask_with_enum_first(self):
xtypes = [(ENUM, 2), FLOAT]
x = np.array([[1, 1.5], [0, 1.5], [1, 1.5]])
expected = [[0, 1, 1.5], [1, 0, 1.5], [0, 1, 1.5]]
self.assertListEqual(expected, unfold_with_enum_mask(xtypes, x).tolist())
def test_fold_with_enum_index(self):
xtypes = [FLOAT, (ENUM, 2)]
x = np.array([[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]])
expected = [[1.5, 1], [1.5, 0], [1.5, 1]]
self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist())
def test_fold_with_enum_index_with_list(self):
xtypes = [FLOAT, (ENUM, 2)]
expected = [[1.5, 1]]
x = np.array([1.5, 0, 1])
self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist())
x = [1.5, 0, 1]
self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist())
def test_cast_to_enum_value(self):
xlimits = [[0.0, 4.0], ["blue", "red"]]
x_col = 1
enum_indexes = [1, 1, 0, 1, 0]
expected = ["red", "red", "blue", "red", "blue"]
self.assertListEqual(expected, cast_to_enum_value(xlimits, x_col, enum_indexes))
def test_unfolded_xlimits_type(self):
xtypes = [FLOAT, (ENUM, 2), (ENUM, 2), INT]
xlimits = np.array([[-5, 5], ["2", "3"], ["4", "5"], [0, 2]])
sampling = MixedIntegerSamplingMethod(xtypes, xlimits, LHS, criterion="ese")
doe = sampling(10)
self.assertEqual((10, 4), doe.shape)
def test_cast_to_mixed_integer(self):
xtypes = [FLOAT, (ENUM, 2), (ENUM, 3), INT]
xlimits = np.array(
[[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 2]],
dtype="object",
)
x = np.array([1.5, 0, 2, 1])
self.assertEqual(
[1.5, "blue", "long", 1], cast_to_mixed_integer(xtypes, xlimits, x)
)
def run_mixed_integer_lhs_example(self):
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from smt.sampling_methods import LHS
from smt.applications.mixed_integer import (
FLOAT,
INT,
ENUM,
MixedIntegerSamplingMethod,
)
xtypes = [FLOAT, (ENUM, 2)]
xlimits = [[0.0, 4.0], ["blue", "red"]]
sampling = MixedIntegerSamplingMethod(xtypes, xlimits, LHS, criterion="ese")
num = 40
x = sampling(num)
print(x.shape)
cmap = colors.ListedColormap(xlimits[1])
plt.scatter(x[:, 0], np.zeros(num), c=x[:, 1], cmap=cmap)
plt.show()
def run_mixed_integer_qp_example(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import QP
from smt.applications.mixed_integer import MixedIntegerSurrogateModel, INT
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0])
# xtypes = [FLOAT, INT, (ENUM, 3), (ENUM, 2)]
# FLOAT means x1 continuous
# INT means x2 integer
# (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable
# (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable
sm = MixedIntegerSurrogateModel(xtypes=[INT], xlimits=[[0, 4]], surrogate=QP())
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
def run_mixed_integer_context_example(self):
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
from smt.surrogate_models import KRG
from smt.sampling_methods import LHS, Random
from smt.applications.mixed_integer import MixedIntegerContext, FLOAT, INT, ENUM
xtypes = [INT, FLOAT, (ENUM, 4)]
xlimits = [[0, 5], [0.0, 4.0], ["blue", "red", "green", "yellow"]]
def ftest(x):
return (x[:, 0] * x[:, 0] + x[:, 1] * x[:, 1]) * (x[:, 2] + 1)
# context to create consistent DOEs and surrogate
mixint = MixedIntegerContext(xtypes, xlimits)
# DOE for training
lhs = mixint.build_sampling_method(LHS, criterion="ese")
num = mixint.get_unfolded_dimension() * 5
print("DOE point nb = {}".format(num))
xt = lhs(num)
yt = ftest(xt)
# Surrogate
sm = mixint.build_surrogate_model(KRG())
print(xt)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
rand = mixint.build_sampling_method(Random)
xv = rand(50)
yv = ftest(xv)
yp = sm.predict_values(xv)
plt.plot(yv, yv)
plt.plot(yv, yp, "o")
plt.xlabel("actual")
plt.ylabel("prediction")
plt.show()
if __name__ == "__main__":
TestMixedInteger().run_mixed_integer_context_example()
```
#### File: smt/surrogate_models/rmtc.py
```python
import numpy as np
import scipy.sparse
from numbers import Integral
from smt.utils.linear_solvers import get_solver
from smt.utils.line_search import get_line_search_class
from smt.surrogate_models.rmts import RMTS
from smt.surrogate_models.rmtsclib import PyRMTC
class RMTC(RMTS):
"""
Regularized Minimal-energy Tensor-product Cubic hermite spline (RMTC) interpolant.
RMTC divides the n-dimensional space using n-dimensional box elements.
Each n-D box is represented using a tensor-product of cubic functions,
one in each dimension. The coefficients of the cubic functions are
computed by minimizing the second derivatives of the interpolant under
the condition that it interpolates or approximates the training points.
Advantages:
- Extremely fast to evaluate
- Evaluation/training time are relatively insensitive to the number of
training points
- Avoids oscillations
Disadvantages:
- Training time scales poorly with the # dimensions (too slow beyond 4-D)
- The user must choose the number of elements in each dimension
"""
def _initialize(self):
super(RMTC, self)._initialize()
declare = self.options.declare
declare(
"num_elements",
4,
types=(Integral, list, np.ndarray),
desc="# elements in each dimension - ndarray [nx]",
)
self.name = "RMTC"
def _setup(self):
options = self.options
nx = self.training_points[None][0][0].shape[1]
for name in ["smoothness", "num_elements"]:
if isinstance(options[name], (int, float)):
options[name] = [options[name]] * nx
options[name] = np.atleast_1d(options[name])
self.printer.max_print_depth = options["max_print_depth"]
num = {}
# number of inputs and outputs
num["x"] = self.training_points[None][0][0].shape[1]
num["y"] = self.training_points[None][0][1].shape[1]
# number of elements
num["elem_list"] = np.array(options["num_elements"], int)
num["elem"] = np.prod(num["elem_list"])
# number of terms/coefficients per element
num["term_list"] = 4 * np.ones(num["x"], int)
num["term"] = np.prod(num["term_list"])
# number of nodes
num["uniq_list"] = num["elem_list"] + 1
num["uniq"] = np.prod(num["uniq_list"])
# total number of training points (function values and derivatives)
num["t"] = 0
for kx in self.training_points[None]:
num["t"] += self.training_points[None][kx][0].shape[0]
# for RMT
num["coeff"] = num["term"] * num["elem"]
num["support"] = num["term"]
num["dof"] = num["uniq"] * 2 ** num["x"]
self.num = num
self.rmtsc = PyRMTC()
self.rmtsc.setup(
num["x"],
np.array(self.options["xlimits"][:, 0]),
np.array(self.options["xlimits"][:, 1]),
np.array(num["elem_list"], np.int32),
np.array(num["term_list"], np.int32),
)
def _compute_jac_raw(self, ix1, ix2, x):
n = x.shape[0]
nnz = n * self.num["term"]
data = np.empty(nnz)
rows = np.empty(nnz, np.int32)
cols = np.empty(nnz, np.int32)
self.rmtsc.compute_jac(ix1 - 1, ix2 - 1, n, x.flatten(), data, rows, cols)
return data, rows, cols
def _compute_dof2coeff(self):
num = self.num
# This computes an num['term'] x num['term'] matrix called coeff2nodal.
# Multiplying this matrix with the list of coefficients for an element
# yields the list of function and derivative values at the element nodes.
# We need the inverse, but the matrix size is small enough to invert since
# RMTC is normally only used for 1 <= nx <= 4 in most cases.
elem_coeff2nodal = np.zeros(num["term"] * num["term"])
self.rmtsc.compute_coeff2nodal(elem_coeff2nodal)
elem_coeff2nodal = elem_coeff2nodal.reshape((num["term"], num["term"]))
elem_nodal2coeff = np.linalg.inv(elem_coeff2nodal)
# This computes a num_coeff_elem x num_coeff_uniq permutation matrix called
# uniq2elem. This sparse matrix maps the unique list of nodal function and
# derivative values to the same function and derivative values, but ordered
# by element, with repetition.
nnz = num["elem"] * num["term"]
data = np.empty(nnz)
rows = np.empty(nnz, np.int32)
cols = np.empty(nnz, np.int32)
self.rmtsc.compute_uniq2elem(data, rows, cols)
num_coeff_elem = num["term"] * num["elem"]
num_coeff_uniq = num["uniq"] * 2 ** num["x"]
full_uniq2elem = scipy.sparse.csc_matrix(
(data, (rows, cols)), shape=(num_coeff_elem, num_coeff_uniq)
)
# This computes the matrix full_dof2coeff, which maps the unique
# degrees of freedom to the list of coefficients ordered by element.
nnz = num["term"] ** 2 * num["elem"]
data = np.empty(nnz)
rows = np.empty(nnz, np.int32)
cols = np.empty(nnz, np.int32)
self.rmtsc.compute_full_from_block(elem_nodal2coeff.flatten(), data, rows, cols)
num_coeff = num["term"] * num["elem"]
full_nodal2coeff = scipy.sparse.csc_matrix(
(data, (rows, cols)), shape=(num_coeff, num_coeff)
)
full_dof2coeff = full_nodal2coeff * full_uniq2elem
return full_dof2coeff
```
#### File: surrogate_models/tests/test_krg_training.py
```python
from __future__ import print_function, division
import numpy as np
import unittest
from smt.utils.sm_test_case import SMTestCase
from smt.utils.kriging_utils import (
abs_exp,
squar_exp,
act_exp,
cross_distances,
componentwise_distance,
standardization,
matern52,
matern32,
)
from smt.sampling_methods.lhs import LHS
from smt.surrogate_models import KRG, MGP
print_output = False
class Test(SMTestCase):
def setUp(self):
eps = 1e-8
xlimits = np.asarray([[0, 1], [0, 1]])
self.random = np.random.RandomState(42)
lhs = LHS(xlimits=xlimits, random_state=self.random)
X = lhs(8)
y = LHS(xlimits=np.asarray([[0, 1]]), random_state=self.random)(8)
X_norma, y_norma, X_offset, y_mean, X_scale, y_std = standardization(X, y)
D, ij = cross_distances(X_norma)
theta = self.random.rand(2)
corr_str = ["abs_exp", "squar_exp", "act_exp", "matern32", "matern52"]
corr_def = [abs_exp, squar_exp, act_exp, matern32, matern52]
self.eps = eps
self.X = X
self.y = y
(
self.X_norma,
self.y_norma,
self.X_offset,
self.y_mean,
self.X_scale,
self.y_std,
) = (
X_norma,
y_norma,
X_offset,
y_mean,
X_scale,
y_std,
)
self.D, self.ij = D, ij
self.theta = theta
self.corr_str = corr_str
self.corr_def = corr_def
def test_noise_estimation(self):
xt = np.array([[0.0], [1.0], [2.0], [3.0], [4.0]])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = KRG(hyper_opt="Cobyla", eval_noise=True, noise0=[1e-4])
sm.set_training_values(xt, yt)
sm.train()
x = np.linspace(0, 4, 100)
y = sm.predict_values(x)
self.assert_error(
np.array(sm.optimal_theta), np.array([0.11798507]), 1e-5, 1e-5
)
def test_corr_derivatives(self):
for ind, corr in enumerate(self.corr_def): # For every kernel
self.corr_str[ind] = self.corr_def[ind]
D = componentwise_distance(self.D, self.corr_str, self.X.shape[1])
k = corr(self.theta, D)
K = np.eye(self.X.shape[0])
K[self.ij[:, 0], self.ij[:, 1]] = k[:, 0]
K[self.ij[:, 1], self.ij[:, 0]] = k[:, 0]
grad_norm_all = []
diff_norm_all = []
ind_theta = []
for i, theta_i in enumerate(self.theta):
eps_theta = np.zeros(self.theta.shape)
eps_theta[i] = self.eps
k_dk = corr(self.theta + eps_theta, D)
K_dk = np.eye(self.X.shape[0])
K_dk[self.ij[:, 0], self.ij[:, 1]] = k_dk[:, 0]
K_dk[self.ij[:, 1], self.ij[:, 0]] = k_dk[:, 0]
grad_eps = (K_dk - K) / self.eps
dk = corr(self.theta, D, grad_ind=i)
dK = np.zeros((self.X.shape[0], self.X.shape[0]))
dK[self.ij[:, 0], self.ij[:, 1]] = dk[:, 0]
dK[self.ij[:, 1], self.ij[:, 0]] = dk[:, 0]
grad_norm_all.append(np.linalg.norm(dK))
diff_norm_all.append(np.linalg.norm(grad_eps))
ind_theta.append(r"$x_%d$" % i)
self.assert_error(
np.array(grad_norm_all), np.array(diff_norm_all), 1e-5, 1e-5
) # from utils/smt_test_case.py
def test_corr_hessian(self):
for ind, corr in enumerate(self.corr_def): # For every kernel
self.corr_str[ind] = self.corr_def[ind]
D = componentwise_distance(self.D, self.corr_str, self.X.shape[1])
grad_norm_all = []
diff_norm_all = []
for i, theta_i in enumerate(self.theta):
k = corr(self.theta, D, grad_ind=i)
K = np.eye(self.X.shape[0])
K[self.ij[:, 0], self.ij[:, 1]] = k[:, 0]
K[self.ij[:, 1], self.ij[:, 0]] = k[:, 0]
for j, omega_j in enumerate(self.theta):
eps_omega = np.zeros(self.theta.shape)
eps_omega[j] = self.eps
k_dk = corr(self.theta + eps_omega, D, grad_ind=i)
K_dk = np.eye(self.X.shape[0])
K_dk[self.ij[:, 0], self.ij[:, 1]] = k_dk[:, 0]
K_dk[self.ij[:, 1], self.ij[:, 0]] = k_dk[:, 0]
grad_eps = (K_dk - K) / self.eps
dk = corr(self.theta, D, grad_ind=i, hess_ind=j)
dK = np.zeros((self.X.shape[0], self.X.shape[0]))
dK[self.ij[:, 0], self.ij[:, 1]] = dk[:, 0]
dK[self.ij[:, 1], self.ij[:, 0]] = dk[:, 0]
grad_norm_all.append(np.linalg.norm(dK))
diff_norm_all.append(np.linalg.norm(grad_eps))
self.assert_error(
np.array(grad_norm_all), np.array(diff_norm_all), 1e-5, 1e-5
) # from utils/smt_test_case.py
def test_likelihood_derivatives(self):
for corr_str in [
"abs_exp",
"squar_exp",
"act_exp",
"matern32",
"matern52",
]: # For every kernel
for poly_str in ["constant", "linear", "quadratic"]: # For every method
if corr_str == "act_exp":
kr = MGP(print_global=False)
theta = self.random.rand(4)
else:
kr = KRG(print_global=False)
theta = self.theta
kr.options["poly"] = poly_str
kr.options["corr"] = corr_str
kr.set_training_values(self.X, self.y)
kr.train()
grad_red, dpar = kr._reduced_likelihood_gradient(theta)
red, par = kr._reduced_likelihood_function(theta)
grad_norm_all = []
diff_norm_all = []
ind_theta = []
for i, theta_i in enumerate(theta):
eps_theta = theta.copy()
eps_theta[i] = eps_theta[i] + self.eps
red_dk, par_dk = kr._reduced_likelihood_function(eps_theta)
dred_dk = (red_dk - red) / self.eps
grad_norm_all.append(grad_red[i])
diff_norm_all.append(float(dred_dk))
ind_theta.append(r"$x_%d$" % i)
grad_norm_all = np.atleast_2d(grad_norm_all)
diff_norm_all = np.atleast_2d(diff_norm_all).T
self.assert_error(
grad_norm_all, diff_norm_all, atol=1e-5, rtol=1e-3
) # from utils/smt_test_case.py
def test_likelihood_hessian(self):
for corr_str in [
"abs_exp",
"squar_exp",
"act_exp",
"matern32",
"matern52",
]: # For every kernel
for poly_str in ["constant", "linear", "quadratic"]: # For every method
if corr_str == "act_exp":
kr = MGP(print_global=False)
theta = self.random.rand(4)
else:
kr = KRG(print_global=False)
theta = self.theta
kr.options["poly"] = poly_str
kr.options["corr"] = corr_str
kr.set_training_values(self.X, self.y)
kr.train()
grad_red, dpar = kr._reduced_likelihood_gradient(theta)
hess, hess_ij, _ = kr._reduced_likelihood_hessian(theta)
Hess = np.zeros((theta.shape[0], theta.shape[0]))
Hess[hess_ij[:, 0], hess_ij[:, 1]] = hess[:, 0]
Hess[hess_ij[:, 1], hess_ij[:, 0]] = hess[:, 0]
grad_norm_all = []
diff_norm_all = []
ind_theta = []
for j, omega_j in enumerate(theta):
eps_omega = theta.copy()
eps_omega[j] += self.eps
grad_red_eps, _ = kr._reduced_likelihood_gradient(eps_omega)
for i, theta_i in enumerate(theta):
hess_eps = (grad_red_eps[i] - grad_red[i]) / self.eps
grad_norm_all.append(
np.linalg.norm(Hess[i, j]) / np.linalg.norm(Hess)
)
diff_norm_all.append(
np.linalg.norm(hess_eps) / np.linalg.norm(Hess)
)
ind_theta.append(r"$x_%d,x_%d$" % (j, i))
self.assert_error(
np.array(grad_norm_all),
np.array(diff_norm_all),
atol=1e-5,
rtol=1e-3,
) # from utils/smt_test_case.py
if __name__ == "__main__":
print_output = True
unittest.main()
```
#### File: utils/test/test_kriging_utils.py
```python
import unittest
import numpy as np
from smt.utils.sm_test_case import SMTestCase
from smt.utils.kriging_utils import standardization
class Test(SMTestCase):
def test_standardization(self):
d, n = (10, 100)
X = np.random.normal(size=(n, d))
y = np.random.normal(size=(n, 1))
X_norm, _, _, _, _, _ = standardization(X, y, scale_X_to_unit=True)
interval = (np.min(X_norm), np.max(X_norm))
self.assertEqual((0, 1), interval)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joshuaulrich/rchitect",
"score": 2
} |
#### File: rchitect/rchitect/py_tools.py
```python
from __future__ import unicode_literals, absolute_import
from rchitect._cffi import lib
import operator
import sys
import importlib
from six import text_type
from types import ModuleType
from .interface import rcopy, robject, rfunction, rcall_p, rcall
def get_p(name, envir):
return rcall_p(("base", "get"), name, envir=envir)
def inject_py_tools():
def py_import(module):
return importlib.import_module(module)
def py_import_builtins():
if sys.version >= "3":
return importlib.import_module("builtins")
else:
return importlib.import_module("__builtin__")
def py_call(fun, *args, **kwargs):
# todo: suuport .asis and .convert
if isinstance(fun, text_type):
fun = eval(fun)
return fun(*args, **kwargs)
def py_copy(*args):
return robject(*args)
def py_eval(code):
return eval(code)
def py_get_attr(obj, key):
if isinstance(obj, ModuleType):
try:
return importlib.import_module("{}.{}".format(obj.__name__, key))
except ImportError:
pass
return getattr(obj, key)
def py_get_item(obj, key):
return obj[key]
def py_names(obj):
try:
return list(k for k in obj.__dict__.keys() if not k.startswith("_"))
except Exception:
return None
def py_object(*args):
if len(args) == 1:
return robject("PyObject", rcopy(args[0]))
elif len(args) == 2:
return robject("PyObject", rcopy(rcopy(object, args[0]), args[1]))
def py_print(r):
rcall_p("cat", repr(r) + "\n")
def py_set_attr(obj, key, value):
lib.Rf_protect(obj)
lib.Rf_protect(key)
lib.Rf_protect(value)
pyo = rcopy(object, obj)
try:
setattr(pyo, rcopy(key), rcopy(value))
finally:
lib.Rf_unprotect(3)
return pyo
def py_set_item(obj, key, value):
lib.Rf_protect(obj)
lib.Rf_protect(key)
lib.Rf_protect(value)
pyo = rcopy(object, obj)
try:
pyo[rcopy(key)] = rcopy(value)
finally:
lib.Rf_unprotect(3)
return pyo
def py_dict(**kwargs):
narg = len(kwargs)
for key in kwargs:
lib.Rf_protect(kwargs[key])
try:
return {key: rcopy(kwargs[key]) for key in kwargs}
finally:
lib.Rf_unprotect(narg)
def py_tuple(*args):
narg = len(args)
for a in args:
lib.Rf_protect(a)
try:
return tuple([rcopy(a) for a in args])
finally:
lib.Rf_unprotect(narg)
def py_unicode(obj):
return text_type(obj)
def assign(name, value, envir):
rcall(("base", "assign"), name, value, envir=envir)
e = rcall(("base", "new.env"), parent=lib.R_GlobalEnv)
kwarg = {"rchitect.py_tools": e}
rcall(("base", "options"), **kwarg)
assign("import", rfunction(py_import, convert=False), e)
assign("import_builtins", rfunction(py_import_builtins, convert=False), e)
assign("py_call", rfunction(py_call, convert=False), e)
assign("py_copy", rfunction(py_copy, convert=True), e)
assign("py_eval", rfunction(py_eval, convert=False), e)
assign("py_get_attr", rfunction(py_get_attr, convert=False), e)
assign("py_get_item", rfunction(py_get_item, convert=False), e)
assign("py_object", rfunction(py_object, asis=True, convert=False), e)
assign("py_set_attr", rfunction(py_set_attr, invisible=True, asis=True, convert=False), e)
assign("py_set_item", rfunction(py_set_item, invisible=True, asis=True, convert=False), e)
assign("py_unicode", rfunction(py_unicode, convert=False), e)
assign("dict", rfunction(py_dict, asis=True, convert=False), e)
assign("tuple", rfunction(py_tuple, asis=True, convert=False), e)
assign("names.PyObject", rfunction(py_names, convert=True), e)
assign("print.PyObject", rfunction(py_print, invisible=True, convert=False), e)
assign(".DollarNames.PyObject", rfunction(py_names, convert=True), e)
assign("$.PyObject", rfunction(py_get_attr, convert=True), e)
assign("[.PyObject", rfunction(py_get_item, convert=True), e)
assign("$<-.PyObject", rfunction(py_set_attr, invisible=True, asis=True, convert=False), e)
assign("[<-.PyObject", rfunction(py_set_item, invisible=True, asis=True, convert=False), e)
assign("&.PyObject", rfunction(operator.and_, invisible=True, convert=False), e)
assign("|.PyObject", rfunction(operator.or_, invisible=True, convert=False), e)
assign("!.PyObject", rfunction(operator.not_, invisible=True, convert=False), e)
def attach():
parent_frame = rcall("sys.frame", -1)
things = [
"import",
"import_builtins",
"py_call",
"py_copy",
"py_eval",
"py_get_attr",
"py_get_item",
"py_object",
"py_set_attr",
"py_set_item",
"py_unicode",
"dict",
"tuple",
"names.PyObject",
"print.PyObject",
".DollarNames.PyObject",
"$.PyObject",
"[.PyObject",
"$<-.PyObject",
"[<-.PyObject",
"&.PyObject",
"|.PyObject",
"!.PyObject"
]
for thing in things:
assign(thing, get_p(thing, e), parent_frame)
assign("attach", robject(attach, invisible=True), e)
```
#### File: rchitect/reticulate/__init__.py
```python
from __future__ import unicode_literals
import os
from rchitect.interface import rcall, rcopy, set_hook, package_event
def configure():
def _configure():
rcall(
("base", "source"),
os.path.join(os.path.dirname(__file__), "config.R"),
rcall(("base", "new.env")))
if "reticulate" in rcopy(rcall(("base", "loadedNamespaces"))):
_configure()
else:
set_hook(package_event("reticulate", "onLoad"), lambda x, y: _configure())
```
#### File: rchitect/rchitect/setup.py
```python
from __future__ import unicode_literals
import sys
import os
from six.moves import input as six_input
from rchitect._cffi import ffi, lib
from .utils import Rhome, ensure_path, system2utf8
from .callbacks import def_callback, setup_unix_callbacks, setup_rstart
def load_lib_error():
return"Cannot load shared library: {}".format(
system2utf8(ffi.string(lib._libR_dl_error_message())))
def load_symbol_error():
return "Cannot load symbol {}: {}".format(
system2utf8(ffi.string(lib._libR_last_loaded_symbol())),
system2utf8(ffi.string(lib._libR_dl_error_message())))
def load_constant_error():
return "Cannot load constant {}: {}".format(
system2utf8(ffi.string(lib._libR_last_loaded_symbol())),
system2utf8(ffi.string(lib._libR_dl_error_message())))
def init(args=None, register_signal_handlers=None):
if not args:
args = ["rchitect", "--quiet", "--no-save"]
if register_signal_handlers is None:
register_signal_handlers = os.environ.get("RCHITECT_REGISTER_SIGNAL_HANDLERS", "1") == "1"
rhome = Rhome()
# microsoft python doesn't load DLL's from PATH
# we will need to open the DLL's directly in _libR_load
ensure_path(rhome)
libR_loaded = lib.Rf_initialize_R != ffi.NULL
if not libR_loaded:
# `system2utf8` may not work before `Rf_initialize_R` because locale may not be set
if not lib._libR_load(rhome.encode("utf-8")):
raise Exception(load_lib_error())
if not lib._libR_load_symbols():
raise Exception(load_symbol_error())
# _libR_is_initialized only works after _libR_load is run.
if not lib._libR_is_initialized():
_argv = [ffi.new("char[]", a.encode("utf-8")) for a in args]
argv = ffi.new("char *[]", _argv)
if sys.platform.startswith("win"):
if register_signal_handlers:
lib.Rf_initialize_R(len(argv), argv)
setup_rstart(rhome, args)
else:
# Rf_initialize_R will set handler for SIGINT
# we need to workaround it
lib.R_SignalHandlers_t[0] = 0
setup_rstart(rhome, args)
lib.R_set_command_line_arguments(len(argv), argv)
lib.GA_initapp(0, ffi.NULL)
lib.setup_Rmainloop()
# require R 4.0
if lib.EmitEmbeddedUTF8_t != ffi.NULL:
lib.EmitEmbeddedUTF8_t[0] = 1
else:
lib.R_SignalHandlers_t[0] = register_signal_handlers
lib.Rf_initialize_R(len(argv), argv)
setup_unix_callbacks()
lib.setup_Rmainloop()
if not libR_loaded:
if not lib._libR_load_constants():
raise Exception(load_constant_error())
lib._libR_setup_xptr_callback()
from rchitect.py_tools import inject_py_tools
inject_py_tools()
if os.environ.get("RCHITECT_RETICULATE_CONFIG", "1") != "0":
from rchitect import reticulate
reticulate.configure()
def loop():
lib.run_Rmainloop()
def ask_input(s):
return six_input(s)
@def_callback()
def show_message(buf):
sys.stdout.write(buf)
sys.stdout.flush()
@def_callback()
def read_console(p, add_history):
sys.stdout.flush()
sys.stderr.flush()
return ask_input(p)
@def_callback()
def write_console_ex(buf, otype):
if otype == 0:
if sys.stdout:
sys.stdout.write(buf)
sys.stdout.flush()
else:
if sys.stderr:
sys.stderr.write(buf)
sys.stderr.flush()
@def_callback()
def busy(which):
pass
@def_callback()
def polled_events():
pass
# @def_callback()
# def clean_up(saveact, status, run_last):
# lib.Rstd_CleanUp(saveact, status, run_last)
@def_callback()
def yes_no_cancel(p):
while True:
try:
result = ask_input("{} [y/n/c]: ".format(p))
if result in ["Y", "y"]:
return 1
elif result in ["N", "n"]:
return 2
else:
return 0
except EOFError:
return 0
except KeyboardInterrupt:
return 0
except Exception:
pass
```
#### File: rchitect/rchitect/xptr.py
```python
from __future__ import unicode_literals
from rchitect._cffi import ffi, lib
from .types import box, unbox
_global_set = dict()
@ffi.callback("void(SEXP)")
def finalizer(s):
h = lib.R_ExternalPtrAddr(s)
del _global_set[h]
def new_xptr_p(x):
h = ffi.new_handle(x)
hp = ffi.cast("void*", h)
_global_set[hp] = h
s = lib.R_MakeExternalPtr(hp, lib.R_NilValue, lib.R_NilValue)
lib.R_RegisterCFinalizerEx(s, finalizer, 1)
return s
def new_xptr(x):
return box(new_xptr_p(x))
def from_xptr(s):
return ffi.from_handle(lib.R_ExternalPtrAddr(unbox(s)))
```
#### File: rchitect/tests/test_robject.py
```python
from __future__ import unicode_literals
from rchitect import rcopy, rcall, reval, robject
from rchitect.interface import rclass, rstring, rint, rdouble
from collections import OrderedDict
def test_booleans():
assert rcall("identical", robject([True, False]), reval("c(TRUE, FALSE)"), _convert=True)
def test_numbers():
assert rcall("identical", robject(1), rint(1), _convert=True)
assert rcall("identical", robject(1.0), rdouble(1), _convert=True)
assert not rcall("identical", robject(1), rdouble(1), _convert=True)
assert not rcall("identical", robject(1.0), rint(1), _convert=True)
assert rcall("identical", robject(complex(1, 2)), reval("1 + 2i"), _convert=True)
assert rcall("identical", robject([1, 2]), reval("c(1L, 2L)"), _convert=True)
assert rcall("identical", robject([1.0, 2.0]), reval("c(1, 2)"), _convert=True)
assert rcall(
"identical",
robject([complex(1, 2), complex(2, 1)]), reval("c(1 + 2i, 2 + 1i)"), _convert=True)
def test_strings():
assert rcall("identical", robject("abc"), rstring("abc"), _convert=True)
assert rcall("identical", robject("β"), rstring("β"), _convert=True)
assert rcall("identical", robject("你"), rstring("你"), _convert=True)
assert rcall("identical", robject(['a', 'b']), reval("c('a', 'b')"), _convert=True)
def test_raw():
assert rcall("rawToChar", robject("raw", b"hello"), _convert=True) == "hello"
def test_none():
assert rcall("identical", robject(None), reval("NULL"), _convert=True)
def test_ordered_list():
d = OrderedDict([("a", 2), ("b", "hello")])
assert rcall("identical", robject(d), reval("list(a = 2L, b = 'hello')"), _convert=True)
def test_functions():
def f(x):
return x + 3
fun = robject(f)
assert "PyCallable" in rclass(fun)
assert rcopy(rcall(fun, 4)) == f(4)
assert rcopy(rcall(fun, x=4)) == f(4)
fun = robject(lambda x: x + 3, convert=False)
assert "PyCallable" in rclass(fun)
ret = rcall(fun, 4)
assert "PyObject" in rclass(ret)
assert rcopy(ret) == f(4)
makef = robject(lambda: f, convert=False)
ret = rcall(makef)
assert "PyCallable" in rclass(ret)
assert rcopy(ret) == f
``` |
{
"source": "JoshuaUrrutia/python-vbr-1",
"score": 3
} |
#### File: src/vbr/connection.py
```python
import psycopg2
import logging
import os
import uuid
from typing import NoReturn
from . import constants
from . import errors
from . import record
from . import unique_record
from . import tableclasses
logging.basicConfig(level=logging.DEBUG)
class VBRConn:
"""Managed connection to a VBR PostgreSQL database
"""
CONNECT_TIMEOUT = 30
SESSION_FIELD_NAME = tableclasses.SESSION_FIELD
def __init__(self,
config: dict = None,
session: str = None,
no_connect: bool = False):
if session is None:
self.session = uuid.uuid4().hex
else:
self.session = str(session)
logging.debug('VBR Session: ' + self.session)
self.session_field = self.SESSION_FIELD_NAME
if no_connect is False:
self.db = self._connect(config)
else:
self.db = None
def _connect(self, config: dict = None) -> NoReturn:
# Environment variables and defaults supported by the VBR database driver
cfg = [('VBR_HOST', 'ip', 'localhost'),
('VBR_USERNAME', 'user', 'vbruser'),
('VBR_PASSWORD', '<PASSWORD>', None), ('VBR_DATABASE', 'db', 'vbr')]
if config == None:
config = {}
vbr = {}
for env_var, cfg_key, default in cfg:
vbr[cfg_key] = config.pop(cfg_key, os.getenv(env_var, None))
logging.debug('Connecting to database {} on {}'.format(
vbr['db'], vbr['ip']))
conn = psycopg2.connect(host=vbr['ip'],
database=vbr['db'],
user=vbr['user'],
password=vbr['pass'],
connect_timeout=self.CONNECT_TIMEOUT)
logging.debug('(Connected)')
return conn
def retrieve_record(self, pk_value: str,
table_name: str) -> record.VBRRecord:
"""Retrieve a VBR Record from the database by primary key and table name
"""
# Get SQL attributes and data from VBR Record
rec_cls = tableclasses.class_from_table(table_name)
db_table = rec_cls.TABLE
db_pk = rec_cls.PRIMARY_KEY
db_cols = rec_cls.field_names(include_pk=True)
sql_columns = ','.join(db_cols)
# Fetch record from database
# Resolve class C for record
# Return C(**record)
SQL = "SELECT {} FROM {} WHERE {} = %s LIMIT 1".format(
sql_columns, db_table, db_pk)
conn = self.db
with conn:
with conn.cursor() as cur:
logging.debug(cur.mogrify(SQL, [
pk_value,
]))
cur.execute(SQL, [
pk_value,
])
try:
db_vals = cur.fetchall()[0]
record = dict()
for col, val in zip(db_cols, db_vals):
record[col] = val
logging.debug('Retrieve successful')
return rec_cls(**record, new=False)
except IndexError:
raise errors.RecordNotFoundError(
'No {0}.{1} record matching {2} was found'.format(
db_table, db_pk, pk_value))
except Exception:
raise
def create_record(self, vbr_object: record.VBRRecord) -> str:
"""Insert a VBR Record into the database
"""
# NOTE: vbr_object is instance of a class defined in tableclasses
# Get SQL attributes and data from VBR Record
db_table = vbr_object.table_name
db_pk = vbr_object.primary_key
db_cols = vbr_object.field_names()
db_values = vbr_object.field_values()
logging.debug('Writing to table {0}'.format(db_table))
# Extend with private session ID
db_cols = list(db_cols)
db_cols.append(self.session_field)
db_values = list(db_values)
db_values.append(self.session)
# enumerate column names
sql_columns = ','.join(db_cols)
# create a '%s' string for every data element
sql_vars = ','.join(['%s' for d in db_cols])
# Construct SQL statement including return of primary key
# https://stackoverflow.com/a/5247723
SQL = "INSERT INTO {} ({}) VALUES ({}) RETURNING {};".format(
db_table, sql_columns, sql_vars, db_pk)
conn = self.db
id_of_new_row = None
# Using a pair of contexts will automatically roll back the pending transaction
# if an Exception is encountered
with conn:
with conn.cursor() as cur:
logging.debug(cur.mogrify(SQL, db_values))
try:
cur.execute(SQL, db_values)
# Get last row created
# https://stackoverflow.com/a/5247723
id_of_new_row = cur.fetchone()[0]
conn.commit()
logging.debug('Create successful: {0}.{1} = {2}'.format(
db_table, db_pk, id_of_new_row))
return str(id_of_new_row)
except psycopg2.errors.UniqueViolation:
# TODO check for existence of '*signature_unique' in error string
if isinstance(vbr_object, unique_record.VBRUniqueRecord):
raise errors.DuplicateSignature(
'A record with this distinct signature exists already.'
)
else:
raise
except Exception:
raise
# TODO - implement better failure handling
def update_record(self, vbr_object: record.VBRRecord) -> NoReturn:
"""Update a VBR Record in the database
"""
db_table = vbr_object.table_name
db_pk = vbr_object.primary_key
pk_value = vbr_object._VALUES.get(db_pk)
db_cols = vbr_object.field_names(include_pk=False)
db_values = vbr_object.field_values(include_pk=False)
if pk_value is None:
raise errors.ValidationError(
'Field {0} cannot be empty'.format(db_pk))
# Create SQL statement
data = []
sets = []
for col, val in zip(db_cols, db_values):
sets.append('{0} = %s'.format(col))
data.append(val)
sets_sql = ','.join(sets)
SQL = "UPDATE {0} SET {1} WHERE {2} = %s;".format(
db_table, sets_sql, db_pk)
# Add primary key value to end of data to support the WHERE clause above
data.append(pk_value)
conn = self.db
with conn:
with conn.cursor() as cur:
logging.debug(cur.mogrify(SQL, data))
# TODO - implement check for DuplicateSignature as this will mean that
# TODO - the user is trying to update a record that has the same content as
# TODO - an existing unique record
try:
cur.execute(SQL, data)
conn.commit()
logging.debug('Update successful')
except psycopg2.errors.UniqueViolation:
# TODO check for existence of '*signature_unique' in error string
if isinstance(vbr_object, unique_record.VBRUniqueRecord):
raise errors.DuplicateSignature(
'This record was not updated because it would duplicate an existing unique record'
)
else:
raise
except Exception:
raise
def delete_record(self, vbr_object: record.VBRRecord) -> NoReturn:
"""Delete a VBR Record from the database
"""
# Get SQL attributes and data from VBR Record
db_table = vbr_object.table_name
db_pk = vbr_object.primary_key
pk_value = vbr_object._VALUES.get(db_pk)
SQL = "DELETE FROM {} WHERE {} = %s".format(db_table, db_pk)
conn = self.db
with conn:
with conn.cursor() as cur:
logging.debug(cur.mogrify(SQL, [
pk_value,
]))
cur.execute(SQL, [
pk_value,
])
conn.commit()
logging.debug('Delete successful')
``` |
{
"source": "JoshuaVarga/ImGuess",
"score": 2
} |
#### File: ImGuess/src/ImGuess.pyw
```python
from model import Model
from view import View
from controller import Controller
def main():
model = Model()
view = View()
controller = Controller(model, view)
controller.run()
if __name__ == "__main__":
main();
``` |
{
"source": "joshuavictorchen/neutrino",
"score": 4
} |
#### File: neutrino/neutrino/datum.py
```python
import neutrino.config as c
import neutrino.tools as t
class Datum:
"""Custom data object that contains a DataFrame and a corresponding main key \
with which to pull specific DataFrame values.
.. note::
This class may be used to do more useful things in the future.
**Instance attributes:** \n
* **name** (*str*): Name of the Datum.
* **df** (*DataFrame*): The Datum's DataFrame object, where data is stored.
* **main_key** (*str*): Name of the main (unique) key column of the Datum's DataFrame.
Args:
name (str): Name of the :py:obj:`Datum` to be generated. Used as the default filename when exporting data to CSV.
df (DataFrame): DataFrame object for the Datum.
main_key (str): Name of the main (unique) key column of the provided DataFrame.\
Used to retrieve values from the DataFrame in a similar manner to a dictionary.
save (bool, optional): Exports the DataFrame's data as a CSV to the default database path if ``True``. Defaults to ``False``.
"""
def __init__(self, name, df, main_key, save=False):
self.name = name
self.df = df
# if the provided main_key is none, then default to 'id':
if main_key is None:
main_key = "id"
print(f"\n WARNING: no main key for {name} found; defaulting to 'id'")
self.main_key = main_key
if save:
self.save_csv()
def get(self, return_column, lookup_value, lookup_key=None):
"""Treats the :py:obj:`self.df` DataFrame as a dictionary and pulls the value of ``return_column`` corresponding to \
the row containing ``lookup_value`` within the ``lookup_key`` column.
.. admonition:: TODO
Throw a warning/error if the key is not unique, doesn't exist, etc. Currently, the first matching value is returned \
if multiple matches exist.
Args:
return_column (str): Column of the value to be returned.
lookup_value (str): Value of the key to look up.
lookup_key (str, optional): Column of the key to look up. Defaults to :py:obj:`self.main_key`.
Returns:
various: Value of the ``return_column`` corresponding to the lookup inputs.
"""
# TODO: throw warning if key is not unique, doesn't exist, etc.
if lookup_key is None:
lookup_key = self.main_key
return self.df[return_column].iloc[
self.df[self.df[lookup_key] == lookup_value].index[0]
]
def print_df(self):
"""Simply prints :py:obj:`self.df` to the console with a leading newline."""
print()
print(self.df)
def save_csv(self, custom_name=None, custom_dir=None):
"""Exports :py:obj:`self.df` to a CSV file via :py:obj:`neutrino.tools.save_df_to_csv`.\
The CSV name and filepath may be specified.
Args:
custom_name (str, optional): Name of the CSV file to be saved. Defaults to :py:obj:`self.name`.
custom_dir (str, optional): Path to where the CSV file will be saved.\
Defaults to the :py:obj:`neutrino.main.Neutrino`'s ``db_path``.
"""
csv_name = custom_name if custom_name else self.name
database_path = custom_dir if custom_dir else c.db_path
t.save_df_to_csv(self.df, csv_name, database_path)
```
#### File: neutrino/neutrino/tools.py
```python
import neutrino.config as c
import os
import shutil
import sys
import yaml
from datetime import datetime, timezone, timedelta
from dateutil.parser import isoparse
def print_recursive_dict(data, indent_spaces=3, indent_step=2, recursion=False):
"""Prints a formatted nested dictionary to the console.
.. code-block::
# example console output for an input of {'123':{'456':['aaa', 'bbb', 'ccc']}}
"
123 :
456 : aaa
bbb
ccc"
Args:
data (dict): Dictionary of values that can be converted to strings.
indent_spaces (int, optional): Number of leading whitespaces to insert before each element. Defaults to 3.
indent_step (int, optional): Number of whitespaces to increase the indentation by, for each level of ``dict`` nesting. Defaults to 2.
recursion (bool, optional): Whether or not this method is being called by itself. Defaults to False.
"""
# print a newline once, prior to the formatted dictionary
if not recursion:
print()
# loop through the dictionary
for key, value in data.items():
# the right-justification for each key is equal to the length of the longest key
rjust = len(max(data, key=len))
# if the value is a dictionary, then recursively call this function to print the inner dictionary
if isinstance(value, dict):
print(" " * indent_spaces + f"{key.rjust(rjust)} : ")
print_recursive_dict(
list_to_string(value, rjust),
indent_spaces
+ indent_step
+ rjust
+ 1, # adjust the indentation level of the inner dictionary
indent_step,
True,
)
# if the value is not a dictionary, then print the key-value pair
else:
print(
" " * indent_spaces
+ f"{key.rjust(rjust)} : {list_to_string(value, rjust + indent_spaces + 3)}"
)
def list_to_string(value, leading_whitespaces=1):
"""Takes a list and returns a formatted string containing each element delimited by newlines.
.. admonition:: TODO
Incorporate :py:obj:`print_recursive_dict` for lists with dictionary elements, i.e. ``[{}, {}]``.
Args:
value (list): A list of elements that can be represented by strings.
leading_whitespaces (int, optional): Number of leading whitespaces to insert before each element. Defaults to 1.
Returns:
str: Formatted string containing each element of the provided list delimited by newlines, with ``leading_whitespaces`` leading whitespaces before each element.
.. code-block::
# example returned string for an input of ['abc', 'def', 'ghi']
" abc\\n def\\n ghi"
"""
# just return the same value if it's not a list
if not isinstance(value, list):
return value
# if the list is empty, then return a blank string
elif len(value) == 0:
return ""
# if the list has only one element, then return that element
elif len(value) == 1:
return value[0]
# if the list has more than one element, then return a string containing each element delimited by newlines
# add leading_whitespaces number of leading whitespaces before each element
else:
return_string = str(value[0]) + "\n"
for i in range(1, len(value)):
return_string += (" " * leading_whitespaces) + str(value[i]) + "\n"
return return_string.strip()
def load_yaml_settings(settings_file, settings_template_file):
"""Loads a dictionary of settings values from a YAML file.
This YAML file is gitignored so that the repository's configuration is not affected by user personalization.
If the YAML file does not exist, then it is copied from the repository's version controlled template.
Args:
settings_file (str): Absolute path to the gitignored YAML file.
settings_template_file (str): Absolute path to the version controlled YAML template.
Returns:
dict: Dictionary representation of loaded settings from a YAML file.
"""
# if file does not exist, copy one from the default template
if not os.path.isfile(settings_file):
# TODO: prompt user to update keys_file defs, etc.
shutil.copy2(settings_template_file, settings_file)
print(f"\n Settings file generated: {settings_file}")
settings = parse_yaml(settings_file, echo_yaml=False)
return settings
def parse_yaml(filepath, echo_yaml=True):
"""Parses a YAML file and returns a dict of its contents. Optionally prints the formatted dict to the console.
Args:
filepath (str): Path to the supplied YAML file.
echo_yaml (bool, optional): Whether or not to print the formatted loaded dict to the console. Defaults to True.
Returns:
dict: Dictionary of contents loaded from the supplied YAML file.
"""
# open the file and load its data into a dict
with open(filepath) as stream:
try:
yaml_data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
sys.exit(f"\n Neutrino annihilated - YAML file is corrupted:\n\n {exc}")
# if echo_yaml is True, then print the formatted dict to the console
if echo_yaml:
print_recursive_dict(yaml_data)
return yaml_data
def save_df_to_csv(df, csv_name, database_path):
"""Exports the provided DataFrame to a CSV file. Cleans timestrings per :py:obj:`clean_df_timestrings`.\
Prompts the user to close the file if it exists and is open.
Args:
df (DataFrame): DataFrame to be exported to a CSV file.
csv_name (str): Name of the CSV file to be saved, **without** the ``.csv`` extension \
(i.e., ``saved_file`` instead of ``saved_file.csv``).
database_path (Path): Absolute path to the directory in which the CSV file will be saved.
"""
# TODO: default behavior should be to APPEND to CSV, with option to OVERWRITE
filepath = database_path / (csv_name + ".csv")
df = clean_df_timestrings(df)
while True:
try:
df.to_csv(filepath, index=False)
break
except PermissionError:
response = input(
f"\n Error exporting {csv_name} to CSV: {filepath} is currently open.\
\n Close the file and press [enter] to continue. Input any other key to abort: "
)
if response != "":
print(f"\n {csv_name} CSV not saved.")
return
print(f" \n {csv_name} exported to: {filepath}")
def clean_df_timestrings(df):
"""Ensure the provided DataFrame's time string columns are properly formatted (``%Y-%m-%d %H:%M``). \
This is needed because timestrings loaded into a DataFrame from a CSV file may be automatically reformatted \
into another configuration.
.. note::
This does not affect time strings stored in ISO 8601 format.
Args:
df (DataFrame): DataFrame to be processed.
Returns:
DataFrame: DataFrame object with cleaned time string columns.
"""
# use the add_minutes_to_time_string method (adding 0 minutes) on each column
# this ensures proper time string formatting for all columns containing generic time string strings
for column in df:
try:
df[column] = df[column].apply(lambda x: add_minutes_to_time_string(x, 0))
except:
pass
return df
def move_df_column_inplace(df, column, position):
"""Moves a DataFrame column to the specified index position inplace.
Credit: https://stackoverflow.com/a/58686641/17591579
Args:
df (DataFrame): DataFrame whose columns will be rearranged.
column (str): Name of the column to be moved.
position (int): Index to which the column will be moved.
"""
column = df.pop(column)
df.insert(position, column.name, column)
def local_to_ISO_time_string(local_time_string, time_format=c.TIME_FORMAT):
"""Converts a local time string to an ISO 8601 time string.
Example use case: converting user-specified start/end times in Link.get_product_candles().
Args:
local_time_string (string): Time string with the specified time_format.
time_format (string, optional): Format of local_time_string.
Returns:
str: ISO 8601 time string.
"""
# use epoch as an intermediary for conversion
return datetime.utcfromtimestamp(
datetime.timestamp(datetime.strptime(local_time_string, time_format))
).isoformat()
def ISO_to_local_time_dt(ISO_time_string):
"""Converts an ISO 8601 time string to a local-timezone datetime object.
Example use case: converting API-retrieved timestamps to a usable format for data processing.
Args:
ISO_time_string (str): ISO 8601 time string.
Returns:
datetime: Datetime object (local timezone).
"""
return isoparse(ISO_time_string).replace(tzinfo=timezone.utc).astimezone(tz=None)
def ISO_to_local_time_string(ISO_time_string, time_format=c.TIME_FORMAT):
"""Converts an ISO 8601 time string to a local time string.
Example use case: converting API-retrieved timestamps to local time format for output to the console.
Args:
ISO_time_string (str): ISO 8601 time string.
time_format (str, optional): Format of the returned local time string.
Returns:
str: Local time string.
"""
return datetime.strftime(ISO_to_local_time_dt(ISO_time_string), time_format)
def add_minutes_to_time_string(time_string, minute_delta, time_format=c.TIME_FORMAT):
"""Adds minutes to a given time string and returns the result as another time string.
Args:
time_string (str): Original time string.
minute_delta (int): Minutes to add to the original time string. Can be negative.
time_format (str, optional): Format of the provided and returned time strings.
Returns:
str: Result from time_string plus minute_delta.
"""
return datetime.strftime(
datetime.strptime(time_string, time_format) + timedelta(minutes=minute_delta),
time_format,
)
``` |
{
"source": "JoshuaW1990/bus_arrival_prediction",
"score": 3
} |
#### File: bus_arrival_prediction/preprocess/data_collection.py
```python
import pandas as pd
import os
from datetime import datetime, timedelta
from dateutil.rrule import rrule, DAILY
import requests
import random
import urllib
#################################################################################################################
# helper function for api data, segment data, and other calcualtion #
#################################################################################################################
"""
Helper functions for generating api data, segment data and even the arrival time
list of helper functions:
* calculate_arrival_time
* calculate_arrival_distance
* extract_time
* calculate_time_span
* calculate_time_from_stop
* filter_single_history
"""
def calculate_arrival_time(stop_dist, prev_dist, next_dist, prev_timestamp, next_timestamp):
"""
Calculate the arrival time according to the given tuple (prev_dist, next_dist), the current location, the timestamp of the prev location, and the timestamp of the next location
:param stop_dist: the distance of the target stop between the prev and next tuple
:param prev_dist: the distance of the location of the bus at the previous record
:param next_dist: the distance of the location of the bus at the next record
:param prev_timestamp: the timestamp of the bus at the previous record
:param next_timestamp: the timestamp of the bus at the next record
:return result: the timestamp of the bus arrival the target stop
"""
distance_prev_next = next_dist - prev_dist
distance_prev_stop = stop_dist - prev_dist
ratio = float(distance_prev_stop) / float(distance_prev_next)
duration_prev_next = next_timestamp - prev_timestamp
duration_prev_stop = ratio * duration_prev_next.total_seconds()
duration_prev_stop = timedelta(0, duration_prev_stop)
stop_timestamp = prev_timestamp + duration_prev_stop
return stop_timestamp
def calculate_arrival_distance(time_of_day, prev_dist, next_dist, prev_timestamp, next_timestamp):
"""
calculate arrival distance according to the given input: time_of_day, prev_dist, next_dist, prev_timestamp, next_timestamp
:param time_of_day: the given time for calculating the dist_along_route
:param prev_dist: the distance of the location of the bus for the previous record in historical data
:param next_dist: the distance of the location of the bus for the next record in historical data
:param prev_timestamp: the timestamp of the bus for the previous record in historical data
:param next_timestamp: the timestamp of the bus for the next record in historical data
:return result: dist_along_route for the bus at the given time_of_day
"""
duration_prev_next = next_timestamp - prev_timestamp
duration_prev_time = time_of_day - prev_timestamp
duration_prev_next = duration_prev_next.total_seconds()
duration_prev_time = duration_prev_time.total_seconds()
ratio = duration_prev_time / duration_prev_next
distance_prev_next = next_dist - prev_dist
distance_prev_time = distance_prev_next * ratio
dist_along_route = prev_dist + distance_prev_time
return dist_along_route
def extract_time(time):
"""
Convert the string into datetime format.
:param time: string of time need to be converted. Example: '2017-01-16T15:09:28Z'
:return: the time in datetime format
"""
result = datetime.strptime(time[11: 19], '%H:%M:%S')
return result
def calculate_time_span(time1, time2):
"""
Calculate the duration of two timepoints
:param time1: previous time point in string format, ex: '2017-01-16T15:09:28Z'
:param time2: next time point in string format, ex: '2017-01-16T15:09:28Z'
:return: float number of seconds
"""
timespan = extract_time(time2) - extract_time(time1)
return timespan.total_seconds()
def calculate_time_from_stop(segment_df, dist_along_route, prev_record, next_record):
"""
Calculate the time from stop within the tuple (prev_record, next_record)
Algorithm:
if prev_record = next_record:
the bus is parking at the stop, return 0
Calcualte the distance within the tuple
Calculate the distance between the current location and the prev record
Calcualte the ratio of these two distances
Use the ratio to calcualte the time_from_stop
:param segment_df: dataframe for the preprocessed segment data
:param dist_along_route: distance between the intial stop and the current location of the bus
:param prev_record: single record of the route_stop_dist.csv file
:param next_record: single record of the route_stop_dist.csv file
:return: total seconds of the time_from_stop
"""
if prev_record.get('stop_id') == next_record.get('stop_id'):
return 0.0
distance_stop_stop = next_record.get('dist_along_route') - prev_record.get('dist_along_route')
distance_bus_stop = next_record.get('dist_along_route') - dist_along_route
ratio = float(distance_bus_stop) / float(distance_stop_stop)
assert ratio < 1
try:
travel_duration = segment_df[(segment_df.segment_start == prev_record.get('stop_id')) & (
segment_df.segment_end == next_record.get('stop_id'))].iloc[0]['travel_duration']
except:
travel_duration = segment_df['travel_duration'].mean()
time_from_stop = travel_duration * ratio
return time_from_stop
def filter_single_history(single_history, stop_sequence):
"""
Filter the history file with only one day and one stop sequence to remove abnormal record
:param single_history: dataframe for history table with only one day
:param stop_sequence: list of stop id
:return: dataframe for filtered history table
"""
current_history = single_history[
(single_history.next_stop_id.isin(stop_sequence)) & (single_history.dist_along_route > 0)]
if len(current_history) < 3:
return None
tmp_history = pd.DataFrame(columns=current_history.columns)
i = 1
prev_record = current_history.iloc[0]
while i < len(current_history):
next_record = current_history.iloc[i]
prev_distance = float(prev_record.total_distance)
next_distance = float(next_record.total_distance)
while prev_distance >= next_distance:
i += 1
if i == len(current_history):
break
next_record = current_history.iloc[i]
next_distance = float(next_record.total_distance)
tmp_history.loc[len(tmp_history)] = prev_record
prev_record = next_record
i += 1
if float(prev_record.total_distance) > float(tmp_history.iloc[-1].total_distance):
tmp_history.loc[len(tmp_history)] = prev_record
return tmp_history
#################################################################################################################
# weather.csv #
#################################################################################################################
def get_precip(gooddate, api_token):
"""
Download the weather information for a specific date
:param gooddate: date for downloading
:param api_token: the token for api interface
:return: list of the data
"""
urlstart = 'http://api.wunderground.com/api/' + api_token + '/history_'
urlend = '/q/NY/New_York.json'
url = urlstart + str(gooddate) + urlend
data = requests.get(url).json()
result = None
for summary in data['history']['dailysummary']:
rain = summary['rain']
snow = summary['snow']
if snow == '1':
weather = '2'
elif rain == '1':
weather = '1'
else:
weather = '0'
result = [gooddate, rain, snow, weather]
return result
def download_weather(date_start, date_end, api_token):
"""
download the weather information for a date range
:param date_start: start date, string, ex: '20160101'
:param date_end: similar to date_start
:param api_token: the token for api interface
:return: dataframe for weather table
weather = 2: snow
weather = 1: rain
weather = 0: sunny
"""
a = datetime.strptime(date_start, '%Y%m%d')
b = datetime.strptime(date_end, '%Y%m%d')
result = pd.DataFrame(columns=['date', 'rain', 'snow', 'weather'])
for dt in rrule(DAILY, dtstart=a, until=b):
current_data = get_precip(dt.strftime("%Y%m%d"), api_token)
if current_data is None:
continue
else:
result.loc[len(result)] = current_data
return result
#################################################################################################################
# route_stop_dist.csv #
#################################################################################################################
"""
Calculate the distance of each stops for a specific route from the initial stop.
It will read three different files: trips.txt, stop_times.txt and history file.
Use the stop_times.txt and trips.txt file to obtain the stop sequence for each route and use the historical data to calculate the actual distance for each stop.
"""
def calculate_stop_distance(stop_times, history, direction_id=0):
"""
Calculate the distance of each stop with its initial stop. Notice that the dist_along_route is the distance between the next_stop and the initial stop
Algorithm:
split the history and stop_times table according to the route id and shape id
for each subset of the divided history table:
get the route id and shape id for the subset
get the corresponding subset of the stop_times table
get the stop sequence from this subset
define a new dataframe based on the stop sequence for that shape id
find the distance from history data for the corresponding stop and shape id
save the result for this subset
concatenate all the results
:param stop_times: the stop_times table read from stop_times.txt file in GTFS
:param history: the history table from preprocessed history.csv file
:param direction_id: the direction id which can be 0 or 1
:return: the route_stop_dist table in dataframe
"""
route_grouped_history = history.groupby(['route_id', 'shape_id'])
route_grouped_stop_times = stop_times.groupby(['route_id', 'shape_id'])
result_list = []
for name, single_route_history in route_grouped_history:
route_id, shape_id = name
flag = 0
current_result = pd.DataFrame()
single_stop_times = route_grouped_stop_times.get_group((route_id, shape_id))
trip_id = single_stop_times.iloc[0]['trip_id']
single_stop_times = single_stop_times[single_stop_times.trip_id == trip_id]
single_stop_times.reset_index(inplace=True)
current_result['stop_id'] = single_stop_times['stop_id']
current_result['route_id'] = route_id
current_result['shape_id'] = shape_id
current_result['direction_id'] = direction_id
stop_grouped = single_route_history.groupby(['next_stop_id']).mean()
stop_grouped.reset_index(inplace=True)
stop_grouped['next_stop_id'] = pd.to_numeric(stop_grouped['next_stop_id'])
stop_set = set(stop_grouped['next_stop_id'])
for i in xrange(len(current_result)):
next_stop_id = current_result.iloc[i]['stop_id']
if next_stop_id not in stop_set:
print route_id, shape_id
flag = 1
break
else:
dist_along_route = stop_grouped[stop_grouped.next_stop_id == next_stop_id].iloc[0]['dist_along_route']
current_result.set_value(i, 'dist_along_route', dist_along_route)
if flag == 1:
continue
else:
result_list.append(current_result)
result = pd.concat(result_list, ignore_index=True)
return result
#################################################################################################################
# segment.csv #
#################################################################################################################
"""
generate the segment table
"""
def generate_original_segment(full_history_var, weather, stop_times_var):
"""
Generate the original segment data
Algorithm:
Split the full historical data according to the service date, trip_id with groupby function
For name, item in splitted historical dataset:
service date, trip_id = name[0], name[1]
Find the vehicle id which is the majority elements in this column (For removing the abnormal value in historical data)
calcualte the travel duration within the segement of this splitted historical data and save the result into list
concatenate the list
:param full_history_var: the historical data after filtering
:param weather: the dataframe for the weather information
:param stop_times_var: the dataframe from stop_times.txt
:return: dataframe for the original segment
format:
segment_start, segment_end, timestamp, travel_duration, weather, service date, day_of_week, trip_id, vehicle_id
"""
full_history_var = full_history_var[full_history_var.total_distance > 0]
grouped = list(full_history_var.groupby(['service_date', 'trip_id']))
result_list = []
step_count = range(0, len(grouped), len(grouped) / 10)
for index in range(len(grouped)):
name, single_history = grouped[index]
if index in step_count:
print "process: ", str(step_count.index(index)) + '/' + str(10)
service_date, trip_id = name
if service_date <= 20160103:
continue
grouped_vehicle_id = list(single_history.groupby(['vehicle_id']))
majority_length = -1
majority_vehicle = -1
majority_history = single_history
for vehicle_id, item in grouped_vehicle_id:
if len(item) > majority_length:
majority_length = len(item)
majority_history = item
majority_vehicle = vehicle_id
stop_sequence = [item for item in list(stop_times_var[stop_times_var.trip_id == trip_id].stop_id)]
current_segment_df = generate_original_segment_single_history(majority_history, stop_sequence)
if current_segment_df is None:
continue
current_weather = weather[weather.date == service_date].iloc[0]['weather']
current_segment_df['weather'] = current_weather
day_of_week = datetime.strptime(str(service_date), '%Y%m%d').weekday()
current_segment_df['service_date'] = service_date
current_segment_df['day_of_week'] = day_of_week
current_segment_df['trip_id'] = trip_id
current_segment_df['vehicle_id'] = majority_vehicle
result_list.append(current_segment_df)
if result_list != []:
result = pd.concat(result_list, ignore_index=True)
else:
return None
return result
def generate_original_segment_single_history(history, stop_sequence):
"""
Calculate the travel duration for a single historical data
Algorithm:
Filter the historical data with the stop sequence here
arrival_time_list = []
i = 0
while i < len(history):
use prev and the next to mark the record:
prev = history[i - 1]
next = history[i]
check whether the prev stop is the same as the next stop:
if yes, skip this row and continue to next row
prev_distance = prev.dist_along_route - prev.dist_from_stop
next_distance = next.dist_along_route - next.dist_from_stop
if prev_distance == next_distance:
continue to next row
elif prev.dist_from_stop = 0:
current_arrival_time = prev.timestamp
else:
current_arrival_duration = calcualte_arrival_time(prev.dist_along_route, prev_distance, next_distance, prev_timestamp, next_timestamp)
arrival_time_list.append((prev.next_stop_id, current_arrival_time))
result = pd.Dataframe
for i in range(1, len(arrival_time_list)):
prev = arrival_time_list[i - 1]
next = arrival_time_list[i]
segment_start, segment_end obtained
travel_duration = next[1] - prev[1]
timestamp = prev[1]
save the record to result
:param history: single historical data
:param stop_sequence: stop sequence for the corresponding trip id
:return: the dataframe of the origianl segment dataset
format:
segment_start, segment_end, timestamp, travel_duration
"""
single_history = filter_single_history(history, stop_sequence)
if single_history is None or len(single_history) < 3:
return None
arrival_time_list = []
grouped_list = list(single_history.groupby('next_stop_id'))
if len(grouped_list) < 3:
return None
history = pd.DataFrame(columns=single_history.columns)
for i in xrange(len(grouped_list)):
history.loc[len(history)] = grouped_list[i][1].iloc[-1]
history.sort_values(by='timestamp', inplace=True)
if history.iloc[0]['total_distance'] < 1:
prev_record = history.iloc[1]
i = 2
else:
prev_record = history.iloc[0]
i = 1
while i < len(history):
next_record = history.iloc[i]
while stop_sequence.index(prev_record.next_stop_id) >= stop_sequence.index(next_record.next_stop_id):
i += 1
if i == len(history):
break
next_record = history.iloc[i]
if i == len(history):
break
prev_distance = float(prev_record.total_distance)
next_distance = float(next_record.total_distance)
prev_timestamp = datetime.strptime(prev_record.timestamp, '%Y-%m-%dT%H:%M:%SZ')
next_timestamp = datetime.strptime(next_record.timestamp, '%Y-%m-%dT%H:%M:%SZ')
if prev_distance == next_distance:
# the bus didn't move yet
i += 1
continue
if prev_record.dist_from_stop == 0:
# if prev.dist_from_stop is 0, the bus is 0, then save result into timestamp
current_arrival_time = prev_timestamp
else:
stop_dist = prev_record.dist_along_route
current_arrival_time = calculate_arrival_time(stop_dist, prev_distance, next_distance, prev_timestamp,
next_timestamp)
arrival_time_list.append((prev_record.next_stop_id, current_arrival_time))
prev_record = next_record
i += 1
result = pd.DataFrame(columns=['segment_start', 'segment_end', 'timestamp', 'travel_duration'])
for i in range(1, len(arrival_time_list)):
prev_record = arrival_time_list[i - 1]
next_record = arrival_time_list[i]
segment_start, segment_end = prev_record[0], next_record[0]
timestamp = prev_record[1]
travel_duration = next_record[1] - prev_record[1]
travel_duration = travel_duration.total_seconds()
result.loc[len(result)] = [segment_start, segment_end, str(timestamp), travel_duration]
return result
def improve_dataset_unit(segment_df, stop_sequence):
"""
improve the dataset for a specific trip_id at a specific date: add the skipped segments back into the dataframe
Algorithm:
define result_df
For each row in segment_df:
obtain segment_start, segment_end, timestamp, travel_duration from the current row
start_index: index of segment_start in stop_sequence
end_index: ...
count = end_index - start_index
if count is 1, save the current row and continue to next row
average_travel_duration = travel_duration / count
For index in range(start_index, end_index):
current_segment_start = stop_sequence[index]
current_segment_end = stop_sequence[index + 1]
save the new row with the timestamp, average_travel_duration, current_segment_start, and current_segment_end into result_df
timestamp = timestamp + average_travel_duration
return result_df
:param segment_df: a subset of segment table with one trip id and service date
:param stop_sequence: stop sequence for the corresponding trip id
:return: dataframe for improved segment table
return format:
segment_start, segment_end, timestamp, travel_duration
"""
result = pd.DataFrame(columns=['segment_start', 'segment_end', 'timestamp', 'travel_duration'])
for i in xrange(len(segment_df)):
segment_start = segment_df.iloc[i]['segment_start']
segment_end = segment_df.iloc[i]['segment_end']
timestamp = segment_df.iloc[i]['timestamp']
travel_duration = segment_df.iloc[i]['travel_duration']
start_index = stop_sequence.index(segment_start)
end_index = stop_sequence.index(segment_end)
count = end_index - start_index
if count <= 0:
print "error"
continue
average_travel_duration = float(travel_duration) / float(count)
for j in range(start_index, end_index):
current_segment_start = stop_sequence[j]
current_segment_end = stop_sequence[j + 1]
result.loc[len(result)] = [current_segment_start, current_segment_end, timestamp, average_travel_duration]
timestamp = datetime.strptime(timestamp[:19], '%Y-%m-%d %H:%M:%S') + timedelta(0, average_travel_duration)
timestamp = str(timestamp)
return result
def improve_dataset(segment_df, stop_times, weather_df):
"""
Improve the segment table by adding the skipped stops and other extra columns like weather, day_of_week, etc.
algorithm:
split the segment dataframe by groupby(service_date, trip_id)
result_list = []
for name, item in grouped_segment:
obtained the improved segment data for the item
add the columns: weather, service date, day_of_week, trip_id, vehicle_id
save the result into result_list
concatenate the dataframe in the result_list
:param segment_df: the dataframe of segment table
:param stop_times: the dataframe of the stop_times.txt file in GTFS dataset
:param weather_df: the dataframe of the weather information
:return: the dataframe of the improved segment table
"""
grouped_list = list(segment_df.groupby(['service_date', 'trip_id']))
result_list = []
for i in xrange(len(grouped_list)):
name, item = grouped_list[i]
service_date, trip_id = name
stop_sequence = list(stop_times[stop_times.trip_id == trip_id].stop_id)
current_segment = improve_dataset_unit(item, stop_sequence)
if current_segment is None:
continue
# add the other columns
current_segment['weather'] = weather_df[weather_df.date == service_date].iloc[0].weather
current_segment['service_date'] = service_date
current_segment['day_of_week'] = datetime.strptime(str(service_date), '%Y%m%d').weekday()
current_segment['trip_id'] = trip_id
current_segment['vehicle_id'] = item.iloc[0].vehicle_id
result_list.append(current_segment)
if result_list == []:
result = None
else:
result = pd.concat(result_list, ignore_index=True)
return result
#################################################################################################################
# api data section #
#################################################################################################################
"""
Generate the api data from the GTFS data and the historical data
"""
def generate_api_data(date_list, time_list, stop_num, route_stop_dist, full_history):
"""
Generate the api data for the test_route_set and given time list
Algorithm:
Generate the set of trip id for test routes
Generate the random test stop id for each test routes
Filtering the historical data with trip id
Generate the list of historical data Groupby(date, trip id)
for each item in the list of the historical data:
obtain the trip id and the date
obtain the corresponding route
obtain the corresponding stop set
for stop in stop set:
for each time point in the time list:
check whether the bus has passed the stop at the time point
if yes, continue to next stop
otherwise, save the record into result
:param time_list: the date list for testing [20160101, 20160102, ...]
:param time_list: the time list for testing, ['12:00:00', '12:05:00', ...]
:param stop_num: the number of the target stop for test
:param route_stop_dist: the dataframe for the route_stop_dist table
:param full_history: the dataframe for the history table
:return: the dataframe for the api data
"""
trip_route_dict = {}
route_stop_dict = {}
grouped = route_stop_dist.groupby(['shape_id'])
for shape_id, single_route_stop_dist in grouped:
stop_sequence = list(single_route_stop_dist.stop_id)
if len(stop_sequence) < 5:
continue
trip_set = set(full_history[full_history.shape_id == shape_id].trip_id)
current_dict = dict.fromkeys(trip_set, shape_id)
trip_route_dict.update(current_dict)
stop_set = set()
for i in range(stop_num):
stop_set.add(stop_sequence[random.randint(2, len(stop_sequence) - 2)])
route_stop_dict[shape_id] = stop_set
history = full_history[
(full_history.trip_id.isin(trip_route_dict.keys())) & (full_history.service_date.isin(date_list))]
history_grouped = history.groupby(['service_date', 'trip_id'])
result = pd.DataFrame(
columns=['trip_id', 'vehicle_id', 'route_id', 'stop_id', 'time_of_day', 'date', 'dist_along_route', 'shape_id'])
print_dict = dict.fromkeys(date_list, True)
for name, single_history in history_grouped:
service_date, trip_id = name
if service_date not in date_list:
continue
if print_dict[service_date]:
print service_date
print_dict[service_date] = False
shape_id = trip_route_dict[trip_id]
stop_set = [str(int(item)) for item in route_stop_dict[shape_id]]
stop_sequence = list(route_stop_dist[route_stop_dist.shape_id == shape_id].stop_id)
# filtering the history data: remove the abnormal value
single_history = filter_single_history(single_history, stop_sequence)
if single_history is None or len(single_history) < 2:
continue
for target_stop in stop_set:
target_index = stop_sequence.index(float(target_stop))
for current_time in time_list:
prev_history = single_history[single_history['timestamp'].apply(lambda x: x[11:19] <= current_time)]
next_history = single_history[single_history['timestamp'].apply(lambda x: x[11:19] > current_time)]
if len(prev_history) == 0:
continue
if len(next_history) == 0:
break
tmp_stop = str(prev_history.iloc[-1].next_stop_id)
tmp_index = stop_sequence.index(float(tmp_stop))
if tmp_index > target_index:
break
# If the bus does not pass the target stop, save the remained stops into the stop sequence and calculate the result
route_id = single_history.iloc[0].route_id
current_list = generate_single_api(current_time, route_id, prev_history, next_history, target_stop, shape_id)
if current_list is not None:
result.loc[len(result)] = current_list
return result
def generate_single_api(current_time, route_id, prev_history, next_history, stop_id, shape_id):
"""
Calculate the single record for the api data
Algorithm for calculate the single record:
According to the time point, find the closest time duration (prev, next)
Calculate the dist_along_route for the bus at current timepoint:
calculate the space distance between the time duration (prev, next)
calculate the time distance of two parts: (prev, current), (prev, next)
use the ratio of the time distance to multiply with the space distance to obtain the dist_along_route for current
According to the dista_along_route and the stop sequence confirm the remained stops including the target stop
Count the number of the remained stops
:param current_time: The current time for generating the api data
:param route_id: the id of the route for the specific record
:param prev_history: the dataframe of the history table before the timestamp on the record of api data with the same trip id
:param next_history: the dataframe of the history table after the timestamp on the record of api data with the same trip id
:param stop_id: The id of the target stop
:param shape_id: The id of the shape (stop sequence)
:return: the list for the result
"""
single_trip = prev_history.iloc[0].trip_id
prev_record = prev_history.iloc[-1]
next_record = next_history.iloc[0]
# calculate the dist_along_route for current
prev_distance = float(prev_record.total_distance)
next_distance = float(next_record.total_distance)
prev_timestamp = datetime.strptime(prev_record['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
next_timestamp = datetime.strptime(next_record['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
# determine the current time
if prev_record['timestamp'][11:19] <= current_time <= next_record['timestamp'][11:19]:
time_of_day = prev_record['timestamp'][:11] + current_time + 'Z'
else:
# case: this trip is crossing between two days
if current_time > next_record['timestamp'][11:19]:
time_of_day = prev_record['timestamp'][11:19] + current_time + 'Z'
else:
time_of_day = next_record['timestamp'][11:19] + current_time + 'Z'
time_of_day = datetime.strptime(time_of_day, '%Y-%m-%dT%H:%M:%SZ')
dist_along_route = calculate_arrival_distance(time_of_day, prev_distance, next_distance, prev_timestamp, next_timestamp)
# Generate the return list
# trip_id vehicle_id route_id stop_id time_of_day date dist_along_route
result = [single_trip, prev_record['vehicle_id'], route_id, stop_id, str(time_of_day), prev_record['service_date'], dist_along_route, shape_id]
return result
#################################################################################################################
# main function section #
#################################################################################################################
"""
Functions for users
"""
# weather data
def obtain_weather(start_date, end_date, api_token, save_path=None, engine=None):
"""
Download the weather.csv file into save_path
:param start_date: start date, string, example: '20160101'
:param end_date: similar to start_date
:param api_token: api_token for wunderground api interface. Anyone can apply for it in free.
:param save_path: path of a csv file for storing the weather table.
:param engine: database connect engine
:return: return the weather table in dataframe
"""
weather = download_weather(start_date, end_date, api_token)
if save_path is not None:
weather.to_csv(save_path)
if engine is not None:
weather.to_sql(name='weather', con=engine, if_exists='replace', index_label='id')
return weather
# history data
def download_history_file(year, month, date_list, save_path):
"""
Download the history data from nyc database. User still needs to uncompress the data into csv file
:param year: integer to represent the year, example: 2016
:param month: integer to represent the month, example: 1
:param date_list: list of integer to represent the dates of the required data
:param save_path: path for downloading the compressed data
:return: None
"""
year = str(year)
if month < 10:
month = '0' + str(month)
else:
month = str(month)
base_url = 'http://data.mytransit.nyc/bus_time/'
url = base_url + year + '/' + year + '-' + month + '/'
download_file = urllib.URLopener()
for date in date_list:
if date < 10:
date = '0' + str(date)
else:
date = str(date)
filename = 'bus_time_' + year + month + date + '.csv.xz'
file_url = url + filename
download_file.retrieve(file_url, save_path + filename)
def obtain_history(start_date, end_date, trips, history_path, save_path=None, engine=None):
"""
Generate the csv file for history data
:param start_date: integer to represent the start date, example: 20160105
:param end_date: integer to represent the end date, format is the same as start date
:param trips: the dataframe storing the table from trips.txt file in GTFS dataset
:param history_path: path of all the historical data. User should place all the historical data under the same directory and use this directory as the history_path. Please notice that the change of the filename might cause error.
:param save_path: path of a csv file to store the history table
:param engine: database connect engine
:return: the history table in dataframe
"""
trip_set = set(trips.trip_id)
# generate the history data
file_list = os.listdir(history_path)
history_list = []
for filename in file_list:
if not filename.endswith('.csv'):
continue
if int(start_date) <= int(filename[9:17]) <= int(end_date):
print filename
ptr_history = pd.read_csv(history_path + filename)
tmp_history = ptr_history[(ptr_history.dist_along_route != '\N') & (ptr_history.dist_along_route != 0) & (ptr_history.progress == 0) & (ptr_history.block_assigned == 1) & (ptr_history.dist_along_route > 1) & (ptr_history.trip_id.isin(trip_set))]
history_list.append(tmp_history)
result = pd.concat(history_list, ignore_index=True)
# add some other information: total_distance, route_id, shape_id
result['dist_along_route'] = pd.to_numeric(result['dist_along_route'])
result['dist_from_stop'] = pd.to_numeric(result['dist_from_stop'])
result['total_distance'] = result['dist_along_route'] - result['dist_from_stop']
trip_route_dict = trips.set_index('trip_id').to_dict(orient='index')
result['route_id'] = result['trip_id'].apply(lambda x: trip_route_dict[x]['route_id'])
result['shape_id'] = result['trip_id'].apply(lambda x: trip_route_dict[x]['shape_id'])
# export csv file
if save_path is not None:
result.to_csv(save_path)
if engine is not None:
result.to_sql(name='history', con=engine, if_exists='replace', index_label='id')
return result
# route_stop_dist data
def obtain_route_stop_dist(trips, stop_times, history_file, save_path=None, engine=None):
"""
Generate the csv file for route_stop_dist data. In order to obtain a more complete data for route_stop_dist, the size of the history file should be as large as possible.
:param trips: the dataframe storing the table from trips.txt file in GTFS dataset
:param stop_times: the dataframe storing the table from stop_times.txt file in GTFS dataset
:param history_file: path of the preprocessed history file
:param save_path: path of a csv file to store the route_stop_dist table
:param engine: database connect engine
:return: the route_stop_dist table in dataframe
"""
trip_route_dict = trips.set_index('trip_id').to_dict(orient='index')
stop_times['route_id'] = stop_times['trip_id'].apply(lambda x: trip_route_dict[x]['route_id'])
stop_times['shape_id'] = stop_times['trip_id'].apply(lambda x: trip_route_dict[x]['shape_id'])
history = pd.read_csv(history_file)
route_stop_dist = calculate_stop_distance(stop_times, history)
if save_path is not None:
route_stop_dist.to_csv(save_path)
if engine is not None:
route_stop_dist.to_sql(name='route_stop_dist', con=engine, if_exists='replace', index_label='id')
return route_stop_dist
# segment data
def obtain_segment(weather_df, trips, stop_times, route_stop_dist, full_history, training_date_list, save_path=None, engine=None):
"""
Generate the csv file for segment table
:param weather_df: the dataframe storing the weather data
:param trips: the dataframe storing the table from trips.txt file in GTFS dataset
:param stop_times: the dataframe storing the table from stop_times.txt file in GTFS dataset
:param full_history: the dataframe storing the history table
:param training_date_list: the list of dates to generate the segments from history table
:param save_path: path of a csv file to store the segment table
:param engine: database connect engine
:return: the segment table in dataframe
"""
full_history = full_history[full_history.service_date.isin(training_date_list)]
shape_list = set(route_stop_dist.shape_id)
full_history = full_history[full_history.shape_id.isin(shape_list)]
segment_df = generate_original_segment(full_history, weather_df, stop_times)
segment_df = improve_dataset(segment_df, stop_times, weather_df)
trip_route_dict = trips.set_index('trip_id').to_dict(orient='index')
segment_df['route_id'] = segment_df['trip_id'].apply(lambda x: trip_route_dict[x]['route_id'])
segment_df['shape_id'] = segment_df['trip_id'].apply(lambda x: trip_route_dict[x]['shape_id'])
if save_path is not None:
segment_df.to_csv(save_path)
if engine is not None:
segment_df.to_sql(name='segment', con=engine, if_exists='replace', index_label='id')
return segment_df
# api_data table
def obtain_api_data(route_stop_dist, full_history, date_list, time_list, stop_num, save_path=None, engine=None):
"""
Generate the csv file for api_data table
:param route_stop_dist: the dataframe storing route_stop_dist table
:param full_history: the dataframe storing historical data
:param date_list: the list of integers to represent the dates for generating api data. Example: [20160101, 20160102, 20160103]
:param time_list: the list of strings to represent the time for generating api data. Example: ['12:00:00', '12:05:00', '12:10:00', '12:15:00', '12:20:00', '12:25:00', '12:30:00']. Please follow the same format.
:param stop_num: the number of target stop for each shape id
:param save_path: path of a csv file to store the api_data table
:param engine: database connect engine
:return: the dataframe storing api_data table
"""
full_history = full_history[full_history.service_date.isin(date_list)]
result = generate_api_data(date_list, time_list, stop_num, route_stop_dist, full_history)
if save_path is not None:
result.to_csv(save_path)
if engine is not None:
result.to_sql(name='api_data', con=engine, if_exists='replace', index_label='id')
return result
``` |
{
"source": "Joshuawadd/rlcard",
"score": 3
} |
#### File: rlcard/envs/env.py
```python
from rlcard.utils import *
from rlcard.games.whist.utils import cards2list
import os
class Env(object):
'''
The base Env class. For all the environments in RLCard,
we should base on this class and implement as many functions
as we can.
'''
def __init__(self, config):
''' Initialize the environment
Args:
config (dict): A config dictionary. All the fields are
optional. Currently, the dictionary includes:
'seed' (int) - A environment local random seed.
'env_num' (int) - If env_num>1, the environemnt wil be run
with multiple processes. Note the implementatino is
in `vec_env.py`.
'allow_step_back' (boolean) - True if allowing
step_back.
'allow_raw_data' (boolean) - True if allow
raw obs in state['raw_obs'] and raw legal actions in
state['raw_legal_actions'].
'single_agent_mode' (boolean) - True if single agent mode,
i.e., the other players are pretrained models.
'active_player' (int) - If 'singe_agent_mode' is True,
'active_player' specifies the player that does not use
pretrained models.
There can be some game specific configurations, e.g., the
number of players in the game. These fields should start with
'game_', e.g., 'game_player_num' we specify the number of
players in the game. Since these configurations may be game-specific,
The default settings shpuld be put in the Env class. For example,
the default game configurations for Blackjack should be in
'rlcard/envs/blackjack.py'
TODO: Support more game configurations in the future.
'''
self.allow_step_back = self.game.allow_step_back = config['allow_step_back']
self.allow_raw_data = config['allow_raw_data']
self.record_action = config['record_action']
if self.record_action:
self.action_recorder = []
# Game specific configurations
# Currently only support blackjack
# TODO support game configurations for all the games
supported_envs = ['blackjack']
if self.name in supported_envs:
_game_config = self.default_game_config.copy()
for key in config:
if key in _game_config:
_game_config[key] = config[key]
self.game.configure(_game_config)
# Get the number of players/actions in this game
self.player_num = self.game.get_player_num()
self.action_num = self.game.get_action_num()
# A counter for the timesteps
self.timestep = 0
# Modes
self.single_agent_mode = config['single_agent_mode']
self.active_player = config['active_player']
# Load pre-trained models if single_agent_mode=True
if self.single_agent_mode:
self.model = self._load_model()
# If at least one pre-trained agent needs raw data, we set self.allow_raw_data = True
for agent in self.model.agents:
if agent.use_raw:
self.allow_raw_data = True
break
# Set random seed, default is None
self._seed(config['seed'])
def reset(self):
'''
Reset environment in single-agent mode
Call `_init_game` if not in single agent mode
'''
if not self.single_agent_mode:
return self._init_game()
while True:
state, player_id = self.game.init_game()
while not player_id == self.active_player:
self.timestep += 1
action, _ = self.model.agents[player_id].eval_step(
self._extract_state(state))
if not self.model.agents[player_id].use_raw:
action = self._decode_action(action)
state, player_id = self.game.step(action)
if not self.game.is_over():
break
return self._extract_state(state)
def step(self, action, raw_action=False):
''' Step forward
Args:
action (int): The action taken by the current player
raw_action (boolean): True if the action is a raw action
Returns:
(tuple): Tuple containing:
(dict): The next state
(int): The ID of the next player
'''
if not raw_action:
action = self._decode_action(action)
if self.single_agent_mode:
return self._single_agent_step(action)
self.timestep += 1
# Record the action for human interface
if self.record_action:
self.action_recorder.append([self.get_player_id(), action])
next_state, player_id = self.game.step(action)
return self._extract_state(next_state), player_id
def step_back(self):
''' Take one step backward.
Returns:
(tuple): Tuple containing:
(dict): The previous state
(int): The ID of the previous player
Note: Error will be raised if step back from the root node.
'''
if not self.allow_step_back:
raise Exception(
'Step back is off. To use step_back, please set allow_step_back=True in rlcard.make')
if not self.game.step_back():
return False
player_id = self.get_player_id()
state = self.get_state(player_id)
return state, player_id
def set_agents(self, agents):
'''
Set the agents that will interact with the environment.
This function must be called before `run`.
Args:
agents (list): List of Agent classes
'''
if self.single_agent_mode:
raise ValueError(
'Setting agent in single agent mode or human mode is not allowed.')
self.agents = agents
# If at least one agent needs raw data, we set self.allow_raw_data = True
for agent in self.agents:
if agent.use_raw:
self.allow_raw_data = True
break
def run(self, is_training=False):
'''
Run a complete game, either for evaluation or training RL agent.
Args:
is_training (boolean): True if for training purpose.
Returns:
(tuple) Tuple containing:
(list): A list of trajectories generated from the environment.
(list): A list payoffs. Each entry corresponds to one player.
Note: The trajectories are 3-dimension list. The first dimension is for different players.
The second dimension is for different transitions. The third dimension is for the contents of each transiton
'''
if self.single_agent_mode:
raise ValueError('Run in single agent not allowed.')
trajectories = [[] for _ in range(self.player_num)]
state, player_id = self.reset()
player_hand = [0, 0, 0, 0]
difficulty = 0
#print(self.game.trump_suit)
if not is_training:
for player_id in range(self.player_num):
#print(cards2list(self.game.players[player_id].hand))
for card in self.game.players[player_id].hand:
if card.suit == self.game.trump_suit:
if card.rank == 'A':
player_hand[player_id] += 27
elif card.rank == 'K':
player_hand[player_id] += 26
elif card.rank == 'Q':
player_hand[player_id] += 25
elif card.rank == 'J':
player_hand[player_id] += 24
elif card.rank == 'T':
player_hand[player_id] += 23
else:
player_hand[player_id] += (int(card.rank) + 13)
else:
if card.rank == 'A':
player_hand[player_id] += 14
elif card.rank == 'K':
player_hand[player_id] += 13
elif card.rank == 'Q':
player_hand[player_id] += 12
elif card.rank == 'J':
player_hand[player_id] += 11
elif card.rank == 'T':
player_hand[player_id] += 10
else:
player_hand[player_id] += int(card.rank)
#print(player_hand)
score_1 = max(player_hand[0], player_hand[2])
score_2 = max(player_hand[1], player_hand[3])
difficulty = score_1 - score_2
# Loop to play the game
trajectories[player_id].append(state)
while not self.is_over():
# Agent plays
if not is_training:
action, _=self.agents[player_id].eval_step(state)
else:
action=self.agents[player_id].step(state)
# Environment steps
next_state, next_player_id=self.step(
action, self.agents[player_id].use_raw)
# Save action
trajectories[player_id].append(action)
# Set the state and player
state=next_state
player_id=next_player_id
# Save state.
if not self.game.is_over():
trajectories[player_id].append(state)
# Add a final state to all the players
for player_id in range(self.player_num):
state=self.get_state(player_id)
trajectories[player_id].append(state)
# Payoffs
payoffs=self.get_payoffs()
# print("start")
# print(trajectories)
# print()
# Reorganize the trajectories
trajectories=reorganize(trajectories, payoffs)
return trajectories, payoffs, difficulty
def run_example(self, log_location, is_training=False):
'''
Run a complete game, either for evaluation or training RL agent.
Args:
is_training (boolean): True if for training purpose.
Returns:
(tuple) Tuple containing:
(list): A list of trajectories generated from the environment.
(list): A list payoffs. Each entry corresponds to one player.
Note: The trajectories are 3-dimension list. The first dimension is for different players.
The second dimension is for different transitions. The third dimension is for the contents of each transiton
'''
if self.single_agent_mode:
raise ValueError('Run in single agent not allowed.')
trajectories=[[] for _ in range(self.player_num)]
state, player_id=self.reset()
player_hand = [0, 0, 0, 0]
if not is_training:
for player_id in range(self.player_num):
#print(cards2list(self.game.players[player_id].hand))
for card in self.game.players[player_id].hand:
if card.suit == self.game.trump_suit:
if card.rank == 'A':
player_hand[player_id] += 27
elif card.rank == 'K':
player_hand[player_id] += 26
elif card.rank == 'Q':
player_hand[player_id] += 25
elif card.rank == 'J':
player_hand[player_id] += 24
elif card.rank == 'T':
player_hand[player_id] += 23
else:
player_hand[player_id] += (int(card.rank) + 13)
else:
if card.rank == 'A':
player_hand[player_id] += 14
elif card.rank == 'K':
player_hand[player_id] += 13
elif card.rank == 'Q':
player_hand[player_id] += 12
elif card.rank == 'J':
player_hand[player_id] += 11
elif card.rank == 'T':
player_hand[player_id] += 10
else:
player_hand[player_id] += int(card.rank)
#print(player_hand)
score_1 = max(player_hand[0], player_hand[2])
score_2 = max(player_hand[1], player_hand[3])
difficulty = score_1 - score_2
# Loop to play the game
trajectories[player_id].append(state)
i=1
while not self.is_over():
# Agent plays
if not is_training:
action, _=self.agents[player_id].eval_step(state)
else:
action=self.agents[player_id].step(state)
# Environment steps
next_state, next_player_id=self.step(
action, self.agents[player_id].use_raw)
# Save action
trajectories[player_id].append(action)
if i % 4 == 0:
# print("")
# print("Player 0 hand:", cards2list(self.game.players[0].hand))
# print("Player 1 hand:", cards2list(self.game.players[1].hand))
# print("Player 2 hand:", cards2list(self.game.players[2].hand))
# print("Player 3 hand:", cards2list(self.game.players[3].hand))
# print("Lead player:", self.game.round.lead_player)
# print("Trump Suit:", self.game.trump_suit)
# print("Playing Card:", self.game.round.played_card)
# print("Played Cards:", cards2list(self.game.round.played_cards))
# print("Winner:", self.game.round.round_winner, "Winning card:", self.game.round.played_cards[self.game.round.winning_card])
# print("Score:", self.game.players[0].tricks, self.game.players[1].tricks, self.game.players[2].tricks, self.game.players[3].tricks)
with open(log_location, "a") as file_object:
file_object.write("\n")
file_object.write(
"Difficulty: " + str(difficulty) + "\n")
file_object.write(
"Player 0 hand: " + str(cards2list(self.game.players[0].hand)) + "\n")
file_object.write(
"Player 1 hand: " + str(cards2list(self.game.players[1].hand)) + "\n")
file_object.write(
"Player 2 hand: " + str(cards2list(self.game.players[2].hand)) + "\n")
file_object.write(
"Player 3 hand: " + str(cards2list(self.game.players[3].hand)) + "\n")
file_object.write("Lead player: " + \
str(self.game.round.last_lead) + "\n")
file_object.write(
"Trump Suit: " + self.game.trump_suit + "\n")
# file_object.write("Playing Card: " + self.game.round.played_card.__str__() + "\n")
file_object.write(
"Played Cards: " + str(cards2list(self.game.round.round_cards)) + "\n")
file_object.write("Winner: " + str(self.game.round.round_winner) + \
" Winning card: " + self.game.round.winning_card.__str__() + "\n")
file_object.write("Score: " + str(self.game.players[0].tricks) + " " + str(self.game.players[1].tricks) + " " + str(
self.game.players[2].tricks) + " " + str(self.game.players[3].tricks) + "\n")
# Set the state and player
state=next_state
player_id=next_player_id
# Save state.
if not self.game.is_over():
trajectories[player_id].append(state)
i += 1
# Add a final state to all the players
for player_id in range(self.player_num):
state=self.get_state(player_id)
trajectories[player_id].append(state)
# Payoffs
payoffs=self.get_payoffs()
# print("start")
# print(trajectories[0][0])
# Reorganize the trajectories
trajectories=reorganize(trajectories, payoffs)
# return trajectories, payoffs
def is_over(self):
''' Check whether the curent game is over
Returns:
(boolean): True if current game is over
'''
return self.game.is_over()
def get_player_id(self):
''' Get the current player id
Returns:
(int): The id of the current player
'''
return self.game.get_player_id()
def get_state(self, player_id):
''' Get the state given player id
Args:
player_id (int): The player id
Returns:
(numpy.array): The observed state of the player
'''
return self._extract_state(self.game.get_state(player_id))
def get_payoffs(self):
''' Get the payoffs of players. Must be implemented in the child class.
Returns:
(list): A list of payoffs for each player.
Note: Must be implemented in the child class.
'''
raise NotImplementedError
def get_perfect_information(self):
''' Get the perfect information of the current state
Returns:
(dict): A dictionary of all the perfect information of the current state
Note: Must be implemented in the child class.
'''
raise NotImplementedError
def _seed(self, seed=None):
self.np_random, seed=seeding.np_random(seed)
self.game.np_random=self.np_random
return seed
def _init_game(self):
''' Start a new game
Returns:
(tuple): Tuple containing:
(numpy.array): The begining state of the game
(int): The begining player
'''
state, player_id=self.game.init_game()
if self.record_action:
self.action_recorder=[]
return self._extract_state(state), player_id
def _load_model(self):
''' Load pretrained/rule model
Returns:
model (Model): A Model object
'''
raise NotImplementedError
def _extract_state(self, state):
''' Extract useful information from state for RL. Must be implemented in the child class.
Args:
state (dict): The raw state
Returns:
(numpy.array): The extracted state
'''
raise NotImplementedError
def _decode_action(self, action_id):
''' Decode Action id to the action in the game.
Args:
action_id (int): The id of the action
Returns:
(string): The action that will be passed to the game engine.
Note: Must be implemented in the child class.
'''
raise NotImplementedError
def _get_legal_actions(self):
''' Get all legal actions for current state.
Returns:
(list): A list of legal actions' id.
Note: Must be implemented in the child class.
'''
raise NotImplementedError
def _single_agent_step(self, action):
''' Step forward for human/single agent
Args:
action (int): The action takem by the current player
Returns:
next_state (numpy.array): The next state
'''
reward=0.
done=False
self.timestep += 1
state, player_id=self.game.step(action)
while not self.game.is_over() and not player_id == self.active_player:
self.timestep += 1
action, _=self.model.agents[player_id].eval_step(
self._extract_state(state))
if not self.model.agents[player_id].use_raw:
action=self._decode_action(action)
state, player_id=self.game.step(action)
if self.game.is_over():
reward=self.get_payoffs()[self.active_player]
done=True
state=self.reset()
return state, reward, done
return self._extract_state(state), reward, done
@ staticmethod
def init_game():
''' (This function has been replaced by `reset()`)
'''
raise ValueError('init_game is removed. Please use env.reset()')
```
#### File: games/whist/judger.py
```python
from rlcard.core import Judger
from rlcard.utils.utils import rank2int
class WhistJudger(Judger):
def __init__(self, np_random):
''' Initialize a judger class
'''
self.np_random = np_random
def judge_round(self, trump, lead_suit, played_cards):
winning_card = None
for card in played_cards:
if winning_card == None:
winning_card = card
elif card.suit == trump:
if winning_card.suit == trump:
if rank2int(card.rank) > rank2int(winning_card.rank):
winning_card = card
else:
winning_card = card
elif card.suit == lead_suit:
if winning_card.suit == lead_suit:
if rank2int(card.rank) > rank2int(winning_card.rank):
winning_card = card
return played_cards.index(winning_card), winning_card
def judge_game(self, players):
winner = None
start = True
for player in players:
if start:
winner = player.player_id
start = False
else:
if player.tricks > players[winner].tricks:
winner = player.player_id
return winner
```
#### File: games/whist/player.py
```python
from rlcard.core import Player
class WhistPlayer(Player):
def __init__(self, player_id, np_random):
self.np_random = np_random
self.hand = []
self.tricks = 0
self.player_id = player_id
self.empty_suits = []
def get_player_id(self):
''' Return the id of the player
'''
return self.player_id
```
#### File: games/whist/round.py
```python
from rlcard.core import Round, Card
from rlcard.utils.utils import init_standard_deck, elegent_form
from rlcard.games.whist.utils import cards2list
import numpy as np
from rlcard.utils.utils import init_standard_deck
class WhistRound(Round):
def __init__(self, dealer, num_players, np_random, judger, trump_suit):
''' When the game starts, round id should be 1
'''
self.np_random = np_random
self.dealer = dealer
self.target = None
self.current_player = 0
self.num_players = num_players
self.played_cards = []
self.old_cards = []
self.is_over = False
self.winner = None
self.lead_player = 0
self.lead_card = None
self.round_winner = None
self.judger = judger
self.trump_suit = trump_suit
self.played_card = None
self.winning_card = None
self.round_cards = []
self.last_lead = 0
def start_new_round (self, players):
winning_index, self.winning_card = self.judger.judge_round(self.trump_suit, self.lead_card.suit, self.played_cards)
self.round_winner = (self.current_player + winning_index) % self.num_players
players[self.round_winner].tricks +=1
players[(self.round_winner + 2) % self.num_players].tricks += 1
self.old_cards.extend(self.played_cards)
# print("")
# print("Player 0 hand:", cards2list(players[0].hand))
# print("Player 1 hand:", cards2list(players[1].hand))
# print("Player 2 hand:", cards2list(players[2].hand))
# print("Player 3 hand:", cards2list(players[3].hand))
# print("Lead player:", self.lead_player)
# print("Trump Suit:", self.trump_suit)
# print("Played Cards:", cards2list(self.played_cards))
# print("Winner:", self.round_winner, "Winning card:", winning_card)
# print("Score:", players[0].tricks, players[1].tricks, players[2].tricks, players[3].tricks)
self.round_cards = self.played_cards
self.last_lead = self.lead_player
self.played_cards = []
self.current_player = self.round_winner
self.lead_player = self.current_player
if not players[self.current_player].hand:
self.is_over = True
self.winner = self.judger.judge_game(players)
def proceed_round(self, players, action):
''' Call other Classes's functions to keep the game running
'''
player = players[self.current_player]
#print(action)
suit = action[1]
rank = action[0]
# for actions in player.hand:
# print(actions)
for index, card in enumerate(player.hand):
if suit == card.suit and rank == card.rank:
remove_index = index
break
card = player.hand.pop(remove_index)
self.played_card = card
self.played_cards.append(card)
#print(player.get_player_id(), self.lead_player)
if player.get_player_id() == self.lead_player:
self.lead_card = card
else:
if card.suit != self.lead_card.suit:
player.empty_suits.append(card.suit)
self.current_player = (self.current_player + 1) % self.num_players
#print("current player", self.current_player, self.lead_player)
if self.current_player == self.lead_player:
self.start_new_round(players)
def get_legal_actions(self, players, player_id, lead_player, lead_card):
legal_actions = []
hand = players[player_id].hand
target = self.target
#print(lead_card)
if lead_card:
lead_suit = lead_card.suit
lead_suit_cards = []
if player_id == lead_player:
for card in hand:
#print('hi', card.__str__()[0])
#x = card.__str__()
#legal_actions.append(x)
legal_actions.append(card)
else:
for card in hand:
if card.suit == lead_suit:
#x = card.__str__()
#legal_actions.append(x)
lead_suit_cards.append(card)
if not lead_suit_cards:
for card in hand:
#x = card.__str__()
#legal_actions.append(x)
legal_actions.append(card)
else:
for card in lead_suit_cards:
#x = card.__str__()
#legal_actions.append(x)
legal_actions.append(card)
#print('hi', legal_actions)
return cards2list(legal_actions)
def get_state(self, players, player_id):
''' Get player's state
Args:
players (list): The list of UnoPlayer
player_id (int): The id of the player
'''
state = {}
player = players[player_id]
state['hand'] = cards2list(player.hand)
state['played_cards'] = cards2list(self.played_cards)
state['old_cards'] = cards2list(self.old_cards)
others_hand = [[],[],[]]
i=0
for other_player in players:
if other_player.player_id != player_id:
possible_cards = init_standard_deck()
for card in player.hand:
possible_cards.remove(card)
for card in self.played_cards:
possible_cards.remove(card)
for card in self.old_cards:
possible_cards.remove(card)
for card in possible_cards:
if card.suit in other_player.empty_suits:
possible_cards.remove(card)
others_hand[i].extend(possible_cards)
i+=1
#print(cards2list(others_hand[0]))
state['others_hand_0'] = cards2list(others_hand[0])
state['others_hand_1'] = cards2list(others_hand[1])
state['others_hand_2'] = cards2list(others_hand[2])
state['player_position'] = len(self.played_cards)
# others_hand = []
# for player in players:
# if player.player_id != player_id:
# others_hand.extend(player.hand)
# state['others_hand'] = cards2list(others_hand)
state['legal_actions'] = self.get_legal_actions(players, player_id, self.lead_player, self.lead_card)
state['card_num'] = []
state['score'] = []
for player in players:
state['card_num'].append(len(player.hand))
state['score'].append(player.tricks)
if self.lead_card:
state['lead_card'] = self.lead_card.__str__()
else:
state['lead_card'] = self.lead_card
state['lead_player'] = self.lead_player
return state
```
#### File: rlcard/models/whist_rule_models.py
```python
import numpy as np
import rlcard
from rlcard.models.model import Model
from rlcard.utils.utils import rank2int
class WhistRuleAgentV1(object):
''' Whist Rule agent version 1
'''
def __init__(self):
self.use_raw = True
def step(self, state):
''' Predict the action given raw state. A naive rule. Choose the color
that appears least in the hand from legal actions. Try to keep wild
cards as long as it can.
Args:
state (dict): Raw state from the game
Returns:
action (str): Predicted action
'''
legal_actions = state['raw_legal_actions']
state = state['raw_obs']
hand = state['hand']
played_cards = state['played_cards']
player_position = state['player_position']
trump = state['trump_suit']
highest_card = None
winnable_cards = []
lowest_card = None
if player_position == 0:
for card in hand:
#print(card)
if highest_card == None:
highest_card = card
elif rank2int(card[0]) > rank2int(highest_card[0]):
highest_card = card
action = highest_card
else:
for card in legal_actions:
if self.can_win(played_cards, card, trump, state['lead_card'][1]):
winnable_cards.append(card)
#print(winnable_cards)
if winnable_cards:
action = np.random.choice(winnable_cards)
else:
for card in legal_actions:
if lowest_card == None:
lowest_card = card
elif rank2int(card[0]) < rank2int(lowest_card[0]):
lowest_card = card
action = lowest_card
#print(legal_actions, winnable_cards, action)
#print(action)
return action
def eval_step(self, state):
''' Step for evaluation. The same to step
'''
return self.step(state), []
@staticmethod
def can_win(played_cards, card, trump, lead_suit):
played_cards.append(card)
winning_card = None
for card in played_cards:
if winning_card == None:
winning_card = card
elif card[1] == trump:
if winning_card[1] == trump:
if rank2int(card[0]) > rank2int(winning_card[0]):
winning_card = card
else:
winning_card = card
elif card[1] == lead_suit:
if winning_card[1] == lead_suit:
if rank2int(card[0]) > rank2int(winning_card[0]):
winning_card = card
if winning_card == card:
return True
else:
return False
class WhistRuleModelV1(Model):
''' Whist Rule Model version 1
'''
def __init__(self):
''' Load pretrained model
'''
env = rlcard.make('whist')
rule_agent = WhistRuleAgentV1()
self.rule_agents = [rule_agent for _ in range(env.player_num)]
@property
def agents(self):
''' Get a list of agents for each position in a the game
Returns:
agents (list): A list of agents
Note: Each agent should be just like RL agent with step and eval_step
functioning well.
'''
return self.rule_agents
@property
def use_raw(self):
''' Indicate whether use raw state and action
Returns:
use_raw (boolean): True if using raw state and action
'''
return True
```
#### File: rlcard/utils/logger.py
```python
import os
import csv
class Logger(object):
''' Logger saves the running results and helps make plots from the results
'''
def __init__(self, log_dir):
''' Initialize the labels, legend and paths of the plot and log file.
Args:
log_path (str): The path the log files
'''
self.log_dir = log_dir
self.txt_path = os.path.join(log_dir, 'log.txt')
self.csv_path = os.path.join(log_dir, 'performance.csv')
self.fig_path = os.path.join(log_dir, 'fig.png')
self.txt_path_win = os.path.join(log_dir, 'win_log.txt')
self.csv_path_win = os.path.join(log_dir, 'win_performance.csv')
self.fig_path_win = os.path.join(log_dir, 'win_fig.png')
self.fig_path_win_easy = os.path.join(log_dir, 'win_fig_easy.png')
self.fig_path_win_medium = os.path.join(log_dir, 'win_fig_medium.png')
self.fig_path_win_hard = os.path.join(log_dir, 'win_fig_hard.png')
self.fig_path_win_all = os.path.join(log_dir, 'win_fig_all.png')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.txt_file = open(self.txt_path, 'a')
self.csv_file = open(self.csv_path, 'a')
#self.txt_file_win = open(self.txt_path_win, 'w')
self.csv_file_win = open(self.csv_path_win, 'a')
fieldnames = ['episodes', 'reward']
fieldnames_win = ['episodes', 'win rate', 'win rate easy', 'win rate medium', 'win rate hard']
self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames)
self.writer_win = csv.DictWriter(self.csv_file_win, fieldnames=fieldnames_win)
self.writer.writeheader()
self.writer_win.writeheader()
def log(self, text):
''' Write the text to log file then print it.
Args:
text(string): text to log
'''
self.txt_file.write(text+'\n')
self.txt_file.flush()
print(text)
def log_performance(self, episodes, reward, win_rate):
''' Log a point in the curve
Args:
episodes (int): the episodes of the current point
reward (float): the reward of the current point
'''
self.txt_file = open(self.txt_path, 'a')
self.csv_file = open(self.csv_path, 'a')
self.csv_file_win = open(self.csv_path_win, 'a')
fieldnames = ['episodes', 'reward']
fieldnames_win = ['episodes', 'win rate', 'win rate easy', 'win rate medium', 'win rate hard']
self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames)
self.writer_win = csv.DictWriter(self.csv_file_win, fieldnames=fieldnames_win)
self.writer.writerow({'episodes': episodes, 'reward': reward})
self.writer_win.writerow({'episodes': episodes, 'win rate': win_rate[0], 'win rate easy': win_rate[1] , 'win rate medium': win_rate[2], 'win rate hard': win_rate[3]})
print('')
self.log('----------------------------------------')
self.log(' episodes | ' + str(episodes))
self.log(' reward | ' + str(reward))
self.log(' win rate | ' + str(win_rate[0]))
self.log(' win rate easy | ' + str(win_rate[1]))
self.log(' win rate medium | ' + str(win_rate[2]))
self.log(' win rate hard | ' + str(win_rate[3]))
self.log('----------------------------------------')
def plot(self, algorithm):
plot(self.csv_path, self.fig_path, algorithm)
plot_win(self.csv_path_win, self.fig_path_win, 'win rate', algorithm)
plot_win(self.csv_path_win, self.fig_path_win_easy, 'win rate easy', algorithm)
plot_win(self.csv_path_win, self.fig_path_win_medium, 'win rate medium', algorithm)
plot_win(self.csv_path_win, self.fig_path_win_hard, 'win rate hard', algorithm)
plot_win_all(self.csv_path_win, self.fig_path_win_all, algorithm)
def close_files(self):
''' Close the created file objects
'''
if self.txt_path is not None:
self.txt_file.close()
if self.csv_path is not None:
self.csv_file.close()
if self.csv_path_win is not None:
self.csv_file_win.close()
def plot(csv_path, save_path, algorithm):
''' Read data from csv file and plot the results
'''
import matplotlib.pyplot as plt
with open(csv_path) as csvfile:
#print(csv_path)
reader = csv.DictReader(csvfile)
xs = []
ys = []
for row in reader:
xs.append(int(row['episodes']))
ys.append(float(row['reward']))
fig, ax = plt.subplots()
ax.plot(xs, ys, label=algorithm)
ax.set(xlabel='episodes', ylabel='reward')
ax.legend()
ax.grid()
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fig.savefig(save_path)
plt.close(fig)
def plot_win(csv_path, save_path, row_name, algorithm):
''' Read data from csv file and plot the results
'''
import matplotlib.pyplot as plt
with open(csv_path) as csvfile:
#print(csv_path)
reader = csv.DictReader(csvfile)
xs = []
ys = []
for row in reader:
xs.append(int(row['episodes']))
ys.append(float(row[row_name]))
fig, ax = plt.subplots()
ax.plot(xs, ys, label=algorithm)
ax.set(xlabel='episodes', ylabel='win rate')
ax.legend()
ax.grid()
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fig.savefig(save_path)
plt.close(fig)
def plot_win_all(csv_path, save_path, algorithm):
''' Read data from csv file and plot the results
'''
import matplotlib.pyplot as plt
with open(csv_path) as csvfile:
#print(csv_path)
reader = csv.DictReader(csvfile)
xs = []
ys1 = []
ys2 = []
ys3 = []
for row in reader:
xs.append(int(row['episodes']))
ys1.append(float(row['win rate easy']))
ys2.append(float(row['win rate medium']))
ys3.append(float(row['win rate hard']))
fig, ax = plt.subplots()
ax.plot(xs, ys1, label='Easy')
ax.plot(xs, ys2, label='Medium')
ax.plot(xs, ys3, label='Hard')
ax.set(xlabel='episodes', ylabel='win rate')
ax.legend()
ax.grid()
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fig.savefig(save_path)
plt.close(fig)
``` |
{
"source": "joshuawallace/Fragile-Families-Challenge",
"score": 3
} |
#### File: Fragile-Families-Challenge/actual_submission/general_functions.py
```python
import pickle
import numpy as np
import csv
import os.path
# A dict to reference outcomes by their index in the data read in
outcome_indices = {'ID': 0, 'gpa': 1, 'grit': 2, 'materialhardship': 3,
'eviction': 4, 'layoff': 5, 'jobtraining': 6}
def read_in_data(path,
care_about_mothid1=False, remove_bad_columns=True):
"""Reads in the data and removes some columns of unusable data
or non-data
Arguments:
path {string} -- path to the data file
Keyword Arguments:
care_about_mothid1 {bool} -- whether to care about the mothid1 column (default: {False})
remove_bad_columns {bool} -- whether to remove the bad columns (default: {True})
Returns:
tuple -- first element is the header of the data file, and the second element is the data, read in
"""
the_data = []
# Read in the data
with open(path, 'r') as f:
csvreader = csv.reader(f,delimiter=',')
for row in csvreader:
the_data.append(row)
# Remove some of the columns that are id values
if 'background.csv' in path:
for line in the_data:
line = line[2:]
# Remove the header line, save it as its own thing
header = the_data.pop(0)
# Set bounds to remove other unnecessary columns
if 'train.csv' in path:
lower_bound = lambda x: 1; upper_bound = lambda y: len(y)
elif 'background.csv' in path:
lower_bound = lambda x: 0; upper_bound = lambda y: len(y) - 1
else:
raise RuntimeError("Do not understand which file type is being passed, \
and thus do not understand which bounds to use in float conversion.")
# Now, convert numerical values to actual numbers, instead of strings
for i in range(len(the_data)):
for j in range(lower_bound(the_data[i]), upper_bound(the_data[i])):
try:
temp = float(the_data[i][j])
the_data[i][j] = temp # Try to convert to float
except ValueError: # Can't convert to float
the_data[i][j] = 'NA'
# Remove some pre-determined bad columns
if 'background.csv' in path and remove_bad_columns:
columns_to_remove = np.loadtxt("columns_to_remove.txt", dtype=int)
print "Deleting " + str(len(columns_to_remove)) + " columns from " +\
"the survey data, because all the data in those columns either " +\
"are NA, are the same value, or the columns correspond to " +\
"mother or father ID numbers."
for line in the_data:
for j in range(len(columns_to_remove)):
del line[columns_to_remove[j]]
for i in range(len(columns_to_remove)):
del header[columns_to_remove[i]]
return (header, the_data)
def remove_lines_with_all_NA(outcomes_data):
"""Removes lines from the training outcomes that have
all NA values. Since we don't know what outcomes the data
have, no use training on these guys.
Arguments:
outcomes_data {list of lists} -- contains the training outcomes to be processed
Returns:
list of lists -- the original argument but with all lines containing nothing but NA's removed
Raises:
RuntimeError -- for some reason one of the internal lists didn't match the length of the input list.
"""
all_NA = [] # A list that will be filled with Boolean values,
# specifying whether all the outcomes are NA or not.
for i in range(len(outcomes_data)): # Loop through the data
# If all six outcomes are 'NA', append True to all_NA
try:
true1 = 'NA' in outcomes_data[i][outcome_indices['gpa']]
true2 = 'NA' in outcomes_data[i][outcome_indices['grit']]
true3 = 'NA' in outcomes_data[i][outcome_indices['materialhardship']]
true4 = 'NA' in outcomes_data[i][outcome_indices['eviction']]
true5 = 'NA' in outcomes_data[i][outcome_indices['layoff']]
true6 = 'NA' in outcomes_data[i][outcome_indices['jobtraining']]
if true1 and true2 and true3 and true4 and true5 and true6:
all_NA.append(True)
else: # Else append False
all_NA.append(False)
except TypeError:
all_NA.append(False)
# Checking that all_NA is the appropriate length
if len(outcomes_data) != len(all_NA):
raise RuntimeError("For some reason, all_NA is not the proper length \
(the same length as the input)")
# Form the new list based on the elements of the old list that aren't all NA
outcomes_data_removed = [list(outcomes_data[i]) for i in range(len(outcomes_data)) if all_NA[i] == False]
# Print out, letting you know how many rows are kept.
print str(len(outcomes_data_removed)) + " rows kept from the training outcomes \
out of " + str(len(outcomes_data))
# Return
return outcomes_data_removed
def match_up_data_with_training_set_of_outcomes(survey_data,
training_outcomes_data,
clean_up_training=False):
"""Match up the data rows with the corresponding outcomes
Arguments:
survey_data {array-like} -- the survey data
training_outcomes_data {list-like} -- the training outcomes
Keyword Arguments:
clean_up_training {bool} -- clean up training data if there aren't any corresponding survey data (default: {False})
Returns:
tuple -- the survey data matched up, and the training data matched up
"""
training_data_ids = []
# Get the training data outcome ids.
for i in range(len(training_outcomes_data)):
training_data_ids.append(training_outcomes_data[i][
outcome_indices['ID']])
# Match the survey data with the available training data outcomes
survey_data_to_return_temp = [list(survey_data[i]) for i in range(len(survey_data)) if
survey_data[i][-1] in training_data_ids]
# Thanks to http://jakzaprogramowac.pl/pytanie/20037,python-how-to-order-a-list-based-on-another-list for the
# Order the data by id numbers
data_order = dict(zip(training_data_ids, range(len(training_data_ids))))
survey_data_to_return = sorted(survey_data_to_return_temp, key=lambda x: data_order.get(x[-1], len(data_order)))
missing_matches = []
survey_data_to_return_ids = [item[-1] for item in survey_data_to_return]
# See if any training outcomes don't have corresponding survey data
for i in range(len(training_data_ids)):
if training_data_ids[i] not in survey_data_to_return_ids:
missing_matches.append(training_data_ids[i])
if missing_matches:
print "************************"
print "There were some id's in the training set of outcomes not in " +\
"the survey question data. Specifically, " + \
str(len(missing_matches)) + " id's."
# Clean up if allowed and necessary
if clean_up_training == False or not missing_matches:
if missing_matches:
print "Doing nothing about the missing data..."
else:
print "Training data cleanup is set to False"
training_data_to_return = [list(line) for line in training_outcomes_data]
else:
"Matching the training outcomes to the survey data"
training_data_to_return = [list(line) for line in training_outcomes_data]
missing_matches.sort(reverse="true")
for i in missing_matches:
training_data_to_return.pop(i)
return (survey_data_to_return, training_data_to_return)
def data_open_and_process(data_filename="background.csv",
training_outcomes_filename="train.csv",
remove_bad_columns=True):
"""Open and process the data
Keyword Arguments:
data_filename {str} -- the file name for the survey data (default: {"background.csv"})
training_outcomes_filename {str} -- the file name for the outcomes (default: {"train.csv"})
remove_bad_columns {bool} -- remove the bad columns(default: {True})
Returns:
dict -- this has all the information collected from opening and processing the data
"""
print "Reading in training outcomes"
# Read in the outcomes
training_outcomes_header, training_outcomes = read_in_data(training_outcomes_filename)
print "Done reading in the training outcomes, now reading in survey data."
# Read in the survey data
survey_data_header, survey_data = read_in_data(data_filename, remove_bad_columns=remove_bad_columns)
print "Done reading in survey data, now cleaning up training " +\
"outcomes with all NA's."
# Remove lines with all NA
outcomes_NAall_removed = remove_lines_with_all_NA(training_outcomes)
print "Now matching the survey data with the training outcomes, " +\
"to get a training data set."
# Match the survey data to the training data set
survey_data_matched, training_outcomes_matched = \
match_up_data_with_training_set_of_outcomes(survey_data,
outcomes_NAall_removed,
clean_up_training=True)
print "Now removing the id numbers from the data, so the data can be " +\
"used as is."
# Remove id numbers from the data
_ = survey_data_header.pop(-1)
_ = training_outcomes_header.pop(0)
survey_data_ids = [line.pop(-1) for line in survey_data]
survey_data_matched_to_outcomes_ids = [line.pop(-1) for line in survey_data_matched]
training_outcomes_ids = [line.pop(0) for line in training_outcomes]
training_outcomes_NAall_removed_ids = [line.pop(0) for line in outcomes_NAall_removed]
training_outcomes_matched_to_outcomes_ids = [line.pop(0) for line in training_outcomes_matched]
print "Done with input and processing."
return {'survey_data_header': survey_data_header,
'survey_data': survey_data,
'survey_data_ids': survey_data_ids,
'survey_data_matched_to_outcomes': survey_data_matched,
'survey_data_matched_to_outcomes_ids': survey_data_matched_to_outcomes_ids,
'training_outcomes_header': training_outcomes_header,
#'training_outcomes': training_outcomes,
#'training_outcomes_ids': training_outcomes_ids,
#'training_outcomes_NAall_removed': outcomes_NAall_removed,
#'training_outcomes_NAall_removed_ids': training_outcomes_NAall_removed_ids,
'training_outcomes_matched_to_outcomes': training_outcomes_matched,
'training_outcomes_matched_to_outcomes_ids': training_outcomes_matched_to_outcomes_ids}
pickle_file_name = "ffc_data.p"
def save_data_as_pickle(data, path=pickle_file_name):
pickle.dump(data, open(path, 'wb'))
def open_pickle_of_input_data(path=pickle_file_name):
return pickle.load(open(path,'rb'))
def check_if_data_exists_if_not_open_and_read(path=pickle_file_name, remove_bad_columns=True):
if os.path.isfile(path):
print "Pickle file already exists, just reading it in."
print ""
print ""
return open_pickle_of_input_data(path)
else:
print "Pickle file does not exist, now reading in and processing data"
print ""
print ""
data_loaded = data_open_and_process(remove_bad_columns=remove_bad_columns)
save_data_as_pickle(data_loaded)
return data_loaded
def precision_recall_etc(classification, actual_classification):
"""Given a pair of classifications and actual classifications,
calculates various assessment parameters of the classification
Parameters calculated: precision, recall, specificity, NPV, f1,
tp (true positive), tn (true negative), fp (false positive),
fn (false negative), accuracy
Arguments:
classification {[type]} -- the classifications you want to evaluate
actual_classification {[list-like]} -- the reference, actual
classifications to evaluate against
Returns:
dict -- a dictionary which can access all the values in the
description above, with keys matching the values in the
description above.
Raises:
RuntimeError -- len() of the two function arguments not the same
"""
if len(classification) != len(actual_classification): # if lengths don't match
raise RuntimeError("Lengths of arguments to accuracy_percentage \
not the same")
tp = fp = tn = fn = 0 # t=true, f=false, p=postive, n=negative
for i in range(len(classification)):
if actual_classification[i] == 1: # actual sentiment is positive
if classification[i] == actual_classification[i]: # if matches
tp += 1
else: # if doesn't match
fn += 1
else: # actual sentiment is negative
if classification[i] == actual_classification[i]: # if matches
tn += 1
else: # if doesn't match
fp += 1
# calculate the various performance metrics
precision = float(tp)/float(tp + fp)
recall = float(tp)/float(tp + fn)
specificity = float(tn)/float(fp + tn)
NPV = float(tn)/float(tn + fn)
f1 = 2.*float(precision*recall)/float(precision + recall)
return {'precision': precision, 'recall': recall,
'specificity': specificity, 'NPV': NPV,
'f1': f1, 'tp': tp, 'tn': tn, 'fp': fp, 'fn': fn,
'accuracy': float(tp + tn)/float(tp + fp + tn + fn)}
def mean_squared_error(x1, x2):
"""Calculates the mean squared error between x1 and x2
[description]
Arguments:
x1 {list-like} -- the calculated values
x2 {list-like} -- the actual values
Returns:
[type] -- [description]
Raises:
RuntimeError -- [description]
"""
if len(x1) != len(x2):
raise RuntimeError("Length of two iterables is not the same")
sum = 0.
for i in range(len(x1)):
sum += (x1[i] - x2[i])**2
return sum/float(len(x1))
```
#### File: joshuawallace/Fragile-Families-Challenge/look_at_results.py
```python
import matplotlib.pyplot as plt
import numpy as np
def plot_predict_actual_pairs(predicted_values, actual_values, ylabel="The values"):
predicted_values_to_use = np.asarray(predicted_values)
actual_values_to_use = np.asarray(actual_values)
order = actual_values_to_use.argsort()
predicted_values_to_plot = predicted_values_to_use[order]
actual_values_to_plot = actual_values_to_use[order]
number_of_things_to_plot = len(predicted_values_to_plot)
fig= plt.figure()
for i in range(number_of_things_to_plot):
plt.plot(2*[float(i)/float(number_of_things_to_plot)],
[predicted_values_to_plot[i], actual_values_to_plot[i]],
color='blue', marker=None, linewidth=.05)
plt.scatter(float(i)/float(number_of_things_to_plot),
predicted_values_to_plot[i], s=5)
plt.scatter(float(i)/float(number_of_things_to_plot),
actual_values_to_plot[i], s=20)
plt.ylabel(ylabel)
#plt.set_xticklabels([])
return fig
def plot_errors_func_k_noalpha(mean_squared_error, r_squared_error, k_values):
fig = plt.figure(figsize=(7,4))
ax = fig.add_subplot(111)
ln1 = ax.plot(k_values, mean_squared_error, label="MSE", color='blue', ls='--')
ax2 = ax.twinx()
ln2 = ax2.plot(k_values, np.mean(r_squared_error, axis=1), label="R^2mean", color='red')
ln3 = ax2.plot(k_values, np.median(r_squared_error, axis=1), label="R^2median", color='green')
lns = ln1+ln2+ln3
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc='best')
ax.set_ylabel("MSE")
ax2.set_ylabel("R^2")
ax.set_xlabel("Value for K")
ax2.set_ylim(-0.1, .9)
fig.tight_layout()
return fig
def plot_errors_func_k_alpha(mean_squared_error, r_squared_error, k_values, alpha_values):
fig1 = plt.figure(figsize=(7, 4))
ax1 = fig1.add_subplot(111)
for i in range(len(alpha_values)):
ax1.plot(k_values, [line[i] for line in mean_squared_error], label=round(alpha_values[i], 5))
ax1.set_ylabel("MSE")
ax1.set_xlabel("Value for K")
ax1.legend(loc='best')
##########################
fig2 = plt.figure(figsize=(7, 4))
ax2 = fig2.add_subplot(111)
for i in range(len(alpha_values)):
ax2.plot(k_values, [np.mean(line[i]) for line in r_squared_error], label=round(alpha_values[i], 5))
ax2.set_ylabel("Mean R^2")
ax2.set_xlabel("Value for K")
ax2.set_ylim(-0.5,1)
ax2.legend(loc='best')
##########################
fig3 = plt.figure(figsize=(7, 4))
ax3 = fig3.add_subplot(111)
for i in range(len(alpha_values)):
ax3.plot(k_values, [np.median(line[i]) for line in r_squared_error], label=round(alpha_values[i], 5))
ax3.set_ylabel("Median R^2")
ax3.set_xlabel("Value for K")
ax3.set_ylim(-0.5,1)
ax3.legend(loc='best')
fig1.tight_layout()
fig2.tight_layout()
fig3.tight_layout()
return (fig1, fig2, fig3)
``` |
{
"source": "joshuawallace/hw3",
"score": 3
} |
#### File: hw3/p1/plotphase.py
```python
import numpy as np
import matplotlib.pyplot as pp
omega = .1
omega_n = 5
def analytic(t):
return (np.cos(omega*t) - np.cos(omega_n*t) ) / (omega_n * omega_n - omega * omega)
num=[1000,10000,100000]
for type in ('euler', 'euler_symplectic', 'rk4'):
for value in num:
s = type + '.' + str(value) + '.out'
t,x,xprime = np.loadtxt(s,unpack=True)
labelstring = 'Nsteps = ' + str(value)
#if type != 'euler_symplectic':
# pp.plot(x,xprime,label=labelstring)
if value == 10000:
pp.plot(x,xprime,label=labelstring,lw=2.5)
elif value == 1000:
pp.plot(x,xprime,label=labelstring,lw=2)
else:
pp.plot(x,xprime,label=labelstring)
# pp.plot(np.linspace(0.,100.,1000),analytic(np.linspace(0.,100.,1000)),label="True")
pp.xlabel("position")
pp.ylabel("velocity")
pp.xlim(-.1,.1)
pp.ylim(-.3,.3)
if type == 'euler':
pp.xlim(-1,1)
pp.ylim(-1,1)
pp.legend(loc='best')
pp.title(type)
s = 'pdf/' + type + '_phase_plot.pdf'
pp.savefig(s)
pp.close()
#Now plot for a given nsteps all the types
for value in num:
for type in ('euler', 'euler_symplectic', 'rk4'):
s = type + '.' + str(value) + '.out'
t,x,xprime = np.loadtxt(s,unpack=True)
if type == 'euler_symplectic':
pp.plot(x,xprime,label=type,lw=4)
else:
pp.plot(x,xprime,label=type)
#pp.plot(np.linspace(0.,100.,100000),analytic(np.linspace(0.,100.,100000)),label="True")
pp.xlabel("position")
pp.ylabel("velocity")
pp.xlim(-.2,.15)
pp.ylim(-1,1)
pp.legend(loc='best')
titlestring = 'Nsteps = ' + str(value)
pp.title(titlestring)
s = 'pdf/' + str(value) + '_phase_plot.pdf'
pp.savefig(s)
pp.close()
``` |
{
"source": "joshuawall/amuse",
"score": 2
} |
#### File: examples/applications/asterisk_movie_example.py
```python
import numpy.random
from amuse.ic.flatimf import new_flat_mass_distribution
from amuse.ic.plummer import new_plummer_model
from amuse.units import nbody_system as nbody
# from amuse.community.asterisk.interface import AsteriskInterface
from amuse.community.asterisk.interface import Asterisk
# from matplotlib import pyplot
from amuse.units import units
# from amuse.datamodel import Particles
# from amuse.ic.brokenimf import new_scalo_mass_distribution
from amuse.ext.particles_with_color import new_particles_with_blackbody_color
from amuse.community.seba.interface import SeBa
from amuse.community.bhtree.interface import BHTree
def new_stellar_evolution(particles):
stellar_evolution = SeBa()
stellar_evolution.particles.add_particles(particles)
return stellar_evolution
def new_gravity(particles, converter):
gravity = BHTree(converter)
gravity.particles.add_particles(particles)
return gravity
if __name__ in ('__main__', '__plot__'):
number_of_particles = 100
# create a Plummer sphere with a number of stars
numpy.random.seed(12345)
masses = new_flat_mass_distribution(number_of_particles)
converter = nbody.nbody_to_si(1.0 | units.parsec, masses.sum())
particles = new_plummer_model(number_of_particles, converter)
particles.mass = masses
particles.move_to_center()
# create simulation codes
gravity = new_gravity(particles, converter)
stellar_evolution = new_stellar_evolution(particles)
# create channels to and from the local particle set and the simulations
from_gravity_to_local = gravity.particles.new_channel_to(particles)
from_stellar_evolution_to_local = \
stellar_evolution.particles.new_channel_to(particles)
from_stellar_evolution_to_local.copy()
# creating colored particles
particles = new_particles_with_blackbody_color(particles)
particles.alpha = 1.0
particles.radius = (
stellar_evolution.particles.radius.sqrt()
* (1e4 | units.parsec).sqrt()
)
# creating visualization code
converter = nbody.nbody_to_si(10.0 | units.parsec, masses.sum())
visualization = Asterisk(converter, redirection="none")
visualization.initialize_code()
# optional: set the zoom and rotation of the visualization
# visualization.parameters.rotation = (15, -15, 45)
# visualization.parameters.camera_distance = 100 | units.parsec
# add (now colored) particles to visualization
visualization.particles.add_particles(particles)
from_local_to_viz = particles.new_channel_to(visualization.particles)
visualization.store_view(0 | units.Myr)
# evolve module for some time
for i in range(1, 100):
target_time = i * 0.05 | units.Myr
print 'starting evolve to time = ', target_time
gravity.evolve_model(target_time)
from_gravity_to_local.copy()
stellar_evolution.evolve_model(target_time)
from_stellar_evolution_to_local.copy()
from_local_to_viz.copy_attributes(
["x", "y", "z", "red", "green", "blue"])
visualization.particles.radius = (
stellar_evolution.particles.radius.sqrt()
* (1e4 | units.parsec).sqrt()
)
print 'updating visualization to time = ', target_time
visualization.store_view(target_time)
# give the user an opportunity to change the visualization settings
raw_input(
"\n\nTweak your visualization settings and press 'Enter' to continue... ")
# generate screenshots while changing some visual parameters.
for i in range(1, 100):
visualization.parameters.rotation = (15, -i * 10, 0)
visualization.parameters.camera_distance = (
15.0 - (0.1 * i)) | units.parsec
visualization.parameters.scene = i
filename = "screenshot-%05d.png" % i
visualization.screenshot(filename)
visualization.stop()
gravity.stop()
stellar_evolution.stop()
```
#### File: examples/applications/test_HRdiagram_tracks.py
```python
import sys
import os
import warnings
from optparse import OptionParser
from amuse.units import units
from amuse.community.sse.interface import SSE
from amuse.community.evtwin.interface import EVtwin
from amuse.community.evtwin2sse.interface import EVtwin2SSE
from amuse.community.mesa.interface import MESA
from amuse.community.cachedse.interface import CachedStellarEvolution
from amuse.test.amusetest import get_path_to_results
from amuse import datamodel
from amuse.rfi.core import is_mpd_running
usage = """\
usage: %prog [options]
This script will generate HR diagram tracks for
stars with specified a masses.
"""
def stellar_remnant_state(star):
return 10 <= star.stellar_type.value_in(units.stellar_type) and \
star.stellar_type.value_in(units.stellar_type) < 16
def simulate_evolution_tracks(
stellar_evolution=SSE(),
masses=[0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 30.0] | units.MSun,
name_of_the_figure="HR_evolution_tracks.png"
):
"""
For every mass in the `masses' array, a stellar evolution track across the
Hertzsprung-Russell diagram will be calculated and plotted. Each star will
be created, evolved and removed one by one. This is only necessary because
the time span of each track is different (a solar mass star evolution track
takes billions of years, but we don't want to also evolve high mass stars
for billions of years) In most applications the stars have to be evolved up
to a common end time, which can be more easily accomplished by creating an
array (stars = datamodel.Stars(number_of_stars)) and using
evolve_model(end_time = ...).
"""
number_of_stars = len(masses)
all_tracks_luminosity = []
all_tracks_temperature = []
all_tracks_stellar_type = []
stellar_evolution.commit_parameters()
print(
"The evolution across the Hertzsprung-Russell diagram of ",
str(number_of_stars),
" stars with\nvarying masses will be simulated..."
)
for j in range(number_of_stars):
star = datamodel.Particle()
star.mass = masses[j]
print "Created new star with mass: ", star.mass
star = stellar_evolution.particles.add_particle(star)
stellar_evolution.commit_particles()
luminosity_at_time = [] | units.LSun
temperature_at_time = [] | units.K
stellar_type_at_time = [] | units.stellar_type
stopped_evolving = False
# Evolve this star until it changes into a compact stellar remnant
# (white dwarf, neutron star, or black hole)
while not stellar_remnant_state(star) and not stopped_evolving:
luminosity_at_time.append(star.luminosity)
temperature_at_time.append(star.temperature)
stellar_type_at_time.append(star.stellar_type)
previous_age = star.age
try:
stellar_evolution.evolve_model()
# Check whether the age has stopped increasing
stopped_evolving = (star.age == previous_age)
except Exception as ex:
print str(ex)
stopped_evolving = True
if stopped_evolving:
print "Age did not increase during timestep. Aborted evolving..."
else:
stellar_type_at_time.append(star.stellar_type)
# Fudged: final stellar type annotation at previous (Teff, L);
# BHs and neutron stars would otherwise fall off the chart.
luminosity_at_time.append(luminosity_at_time[-1])
temperature_at_time.append(temperature_at_time[-1])
print " ... evolved model to t = " + \
str(star.age.as_quantity_in(units.Myr))
print(
"Star has now become a: ",
star.stellar_type,
"(stellar_type: "+str(
star.stellar_type.value_in(units.stellar_type)
)+")"
)
print
all_tracks_luminosity.append(luminosity_at_time)
all_tracks_temperature.append(temperature_at_time)
all_tracks_stellar_type.append(stellar_type_at_time)
# Remove the star before creating the next one. See comments at the top.
stellar_evolution.particles.remove_particle(star)
stellar_evolution.stop()
plot_HR_diagram(masses, all_tracks_luminosity, all_tracks_temperature,
all_tracks_stellar_type, name_of_the_figure)
print "All done!"
def plot_HR_diagram(
masses,
luminosity_tracks,
temperature_tracks,
stellar_type_tracks,
plotfile):
try:
# This removes the need for ssh -X to be able to do plotting
import matplotlib
matplotlib.use("Agg", warn=False)
from matplotlib import pyplot
print "Plotting the data..."
pyplot.figure(figsize=(7, 8))
pyplot.title('Hertzsprung-Russell diagram', fontsize=12)
pyplot.xlabel('Effective Temperature (K)')
pyplot.ylabel('Luminosity (solar luminosity)')
# Define some strings for line formatting (colors, symbols, etc.), used
# recurrently when many stars are simulated
plot_format_strings_lines = ["r-", "y-", "c-", "b-", "m-"]
len_fmt_str_lin = len(plot_format_strings_lines)
plot_format_strings_symbols = [
"r^", "y^", "c^", "b^", "m^", "rs", "ys", "cs", "bs", "ms"]
len_fmt_str_sym = len(plot_format_strings_symbols)
number_of_stars = len(masses)
for j in range(number_of_stars):
# Plot track of the current star j
x_values = temperature_tracks[j].value_in(units.K)
y_values = luminosity_tracks[j].value_in(units.LSun)
pyplot.loglog(x_values, y_values,
plot_format_strings_lines[j % len_fmt_str_lin])
# Plot symbols whenever this star has switched to the next stellar
# evolution phase
x_values = []
y_values = []
text_values = []
previous_type = 15 | units.stellar_type
for i, type in enumerate(stellar_type_tracks[j]):
if not type == previous_type:
x_values.append(temperature_tracks[j][i].value_in(units.K))
y_values.append(
luminosity_tracks[j][i].value_in(units.LSun))
text_values.append(
stellar_type_tracks[j][i].value_in(units.stellar_type))
previous_type = type
pyplot.loglog(x_values, y_values,
plot_format_strings_symbols[j % len_fmt_str_sym])
text_offset_factor_x = 1.05
text_offset_factor_y = 0.6
for i, phase in enumerate(text_values):
pyplot.annotate(
str(int(phase)),
xy=(x_values[i], y_values[i]),
xytext=(
x_values[i]*text_offset_factor_x,
y_values[i]*text_offset_factor_y)
)
text_offset_factor_x = 1.1
text_offset_factor_y = 0.9
pyplot.annotate(str(masses[j]), xy=(x_values[0], y_values[0]),
xytext=(x_values[0]*text_offset_factor_x,
y_values[0]*text_offset_factor_y),
color='g', horizontalalignment='right')
pyplot.axis([300000., 2500., 1.e-2, 1.e6])
# Or use these axes to also view neutron stars and black holes:
# pyplot.axis([1.e7, 2500., 1.e-11, 1.e6])
pyplot.savefig(plotfile)
print "Meaning of the stellar evolution phase markers (black numbers):"
for i in range(16):
print str(i)+": ", (i | units.stellar_type)
except ImportError:
print "Unable to produce plot: couldn't find matplotlib."
class InstantiateCode(object):
def sse(self, number_of_stars):
return SSE()
def evtwin(self, number_of_stars):
result = EVtwin()
result.initialize_code()
if number_of_stars > result.parameters.maximum_number_of_stars:
result.parameters.maximum_number_of_stars = number_of_stars
warnings.warn(
"You're simulating a large number of stars with EVtwin. This may not be such a good idea...")
return result
def mesa(self, number_of_stars):
result = MESA()
result.initialize_code()
if number_of_stars > (10):
warnings.warn(
"You're simulating a large number of stars with MESA. This may not be such a good idea...")
if number_of_stars > (1000):
raise Exception(
"You want to simulate with more than 1000 stars using MESA, this is not supported")
return result
def evtwin2sse(self, number_of_stars):
result = EVtwin2SSE()
result.initialize_code()
# TODO add maximum_number_of_stars parameter to Evtwin2SSE
# if number_of_stars > result.parameters.maximum_number_of_stars:
# result.parameters.maximum_number_of_stars = number_of_stars
# warnings.warn("You're simulating a large number of stars with EVtwin. This may not be such a good idea...")
return result
def new_code(self, name_of_the_code, number_of_stars):
if hasattr(self, name_of_the_code):
return getattr(self, name_of_the_code)(number_of_stars)
else:
raise Exception(
"Cannot instantiate code with name '{0}'".format(
name_of_the_code
)
)
def new_code(name_of_the_code, number_of_stars):
return InstantiateCode().new_code(name_of_the_code, number_of_stars)
def test_simulate_one_star():
assert is_mpd_running()
code = new_code("sse", 1)
test_results_path = get_path_to_results()
output_file = os.path.join(test_results_path, "HR_evolution_tracks.png")
simulate_evolution_tracks(
code,
[20.0] | units.MSun,
name_of_the_figure=output_file,
)
def new_commandline_option_parser():
result = OptionParser(usage)
result.add_option(
"-c",
"--code",
choices=["sse", "evtwin", "mesa", "evtwin2sse"],
default="sse",
dest="code",
metavar="CODE",
help="CODE to use for stellar evolution"
)
result.add_option(
"-C",
"--cache",
type="string",
default=None,
dest="cacheDir",
help="Use/write cache from directory"
)
result.add_option(
"-p",
"--plot_file",
type="string",
default="HR_evolution_tracks.png",
dest="plot_filename",
help="Name of the file to plot to"
)
return result
if __name__ == '__main__':
if not is_mpd_running():
print "There is no mpd server running. Please do 'mpd &' first."
sys.exit()
parser = new_commandline_option_parser()
(options, arguments) = parser.parse_args()
if arguments:
parser.error("unknown arguments '{0}'".format(arguments))
mass_list = [0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 30.0] | units.MSun
code = new_code(options.code, len(mass_list))
if not (options.cacheDir is None):
print "Using cache directory: %s" % (options.cacheDir)
# As a special case, we use caching of the underlying models instead of
# the model output for EVtwin2SSE
if (options.code == "evtwin2sse"):
code.cache_underlying_models(options.cacheDir)
else:
code = CachedStellarEvolution(code, options.cacheDir)
simulate_evolution_tracks(
code,
masses=mass_list,
name_of_the_figure=options.plot_filename
)
```
#### File: examples/simple/binary_population_synthesis.py
```python
from __future__ import print_function
from amuse.units import units
from amuse.units import quantities
from amuse import datamodel
from amuse.community.seba.interface import SeBa
# from amuse.community.bse.interface import BSE
from matplotlib import pyplot
import numpy
# import time
USE_VECTOR_OPERATIONS = True
def multidimensional_meshgrid(*arrays):
"""
Utitility function to create a multidimensional grid based
on a list of arrays. Each array defines a
range in one dimension.
"""
reversed_quantities = tuple(reversed(arrays))
lengths = map(len, reversed_quantities)
dim = len(reversed_quantities)
size = 1
for length in lengths:
size *= length
result = []
for i, quantity in enumerate(reversed_quantities):
shape = numpy.ones(dim, dtype=numpy.int)
shape[i] = lengths[i]
if quantities.is_quantity(quantity):
array = quantity.value_in(quantity.unit)
else:
array = quantity
array = array.reshape(shape)
for j, length in enumerate(lengths):
if j != i:
array = array.repeat(length, axis=j)
if quantities.is_quantity(quantity):
result.append(quantity.unit.new_quantity(array))
else:
result.append(array)
return tuple(result[::-1])
def create_binary(
stars, binaries, primary_mass, mass_ratio, separation, eccentricity):
"""
creates a single binary, the constituent stars will be accumulated
in the stars partice set, the binary will be added to the binaries
particle set.
"""
primary_star = datamodel.Particle()
primary_star.mass = primary_mass
# we add the particle to the stars set
# and we get a reference to the particle in the stars set
# back
# we want to use that star in constructing the
# binary, so that the binaries all refer to
# the stars in the "stars set"
primary_star = stars.add_particle(primary_star)
secondary_star = datamodel.Particle()
secondary_star.mass = primary_mass * mass_ratio
secondary_star = stars.add_particle(secondary_star)
binary = datamodel.Particle()
binary.eccentricity = eccentricity
binary.semi_major_axis = separation
binary.child1 = primary_star
binary.child2 = secondary_star
binaries.add_particle(binary)
def generate_initial_population_grid(
min_mass, max_mass, number_of_mass_bins,
min_ratio, max_ratio, number_of_ratio_bins,
min_separation, max_separation, number_of_separation_bins,
min_eccentricity, max_eccentricity, number_of_eccentricity_bins
):
"""
creates a set of binaries and a set of stars, the grid
will be divided in equal parts amongst all dimensions (primary mass,
ratio and separation), the total number of binaries will
be the product of all bins.
"""
# quantities.linspace takes care of the units,
# but is otherwhise equal to numpy.arange
primary_masses = quantities.linspace(
min_mass, max_mass, number_of_mass_bins)
mass_ratios = quantities.linspace(
min_ratio, max_ratio, number_of_ratio_bins)
separations = quantities.linspace(
min_separation, max_separation, number_of_separation_bins)
eccentricities = quantities.linspace(
min_eccentricity, max_eccentricity, number_of_eccentricity_bins)
# We can create the binaries individualy (with nested for loops to
# go through each dimension) or at once with vector operations.
# As vector operations are handled by numpy (in C) these are
# much faster (default in this script).
if USE_VECTOR_OPERATIONS is True:
grid = multidimensional_meshgrid(
primary_masses, mass_ratios, separations, eccentricities)
all_primary_masses = grid[0].flatten()
all_mass_ratios = grid[1].flatten()
all_separations = grid[2].flatten()
all_eccentricities = grid[3].flatten()
primary_stars = datamodel.Particles(mass=all_primary_masses)
secondary_stars = datamodel.Particles(
mass=all_primary_masses * all_mass_ratios)
stars = datamodel.Particles()
primary_stars = stars.add_particles(primary_stars)
secondary_stars = stars.add_particles(secondary_stars)
binaries = datamodel.Particles(
semi_major_axis=all_separations,
eccentricity=all_eccentricities
)
binaries.child1 = list(primary_stars)
binaries.child2 = list(secondary_stars)
else:
for primary_mass in primary_masses:
for mass_ratio in mass_ratios:
for separation in separations:
for eccentricity in eccentricities:
create_binary(
stars,
binaries,
primary_mass,
mass_ratio,
separation,
eccentricity
)
return binaries, stars
def evolve_population(binaries, stars, end_time, time_step):
code = SeBa()
# add the stars first, as the binaries will
# refer to them
code.particles.add_particles(stars)
code.binaries.add_particles(binaries)
channel_from_code_to_model_for_binaries = code.binaries.new_channel_to(
binaries)
channel_from_code_to_model_for_stars = code.particles.new_channel_to(stars)
# we evolve in steps of timestep, just to get some feedback
print("start evolving...")
time = 0.0 * end_time
while time < end_time:
time += time_step
code.evolve_model(time)
print("evolved to time: ", time.as_quantity_in(units.Myr))
channel_from_code_to_model_for_stars.copy()
channel_from_code_to_model_for_binaries.copy()
def make_hr_diagram(binaries):
pyplot.figure(figsize=(8, 8))
pyplot.title('Binary population', fontsize=12)
separation = binaries.semi_major_axis.value_in(units.RSun)
eccentricity = binaries.eccentricity
pyplot.hexbin(
separation,
eccentricity,
gridsize=40,
bins='log',
extent=(0, 100, 0.0, 0.9)
)
pyplot.xlabel('semi major axis (AU)')
pyplot.ylabel('eccentricity')
pyplot.show()
if __name__ == "__main__":
print("generating a binary population...")
binaries, stars = generate_initial_population_grid(
0.5 | units.MSun, 1.5 | units.MSun, 3, # mass range
0.9, 0.9, 1, # mass ratios range
10 | units.RSun, 100 | units.RSun, 120, # semi major axis range
0.0, 1.0, 120 # eccentricity range
)
print("generated a population of", len(binaries), "binaries")
evolve_population(binaries, stars, 1 | units.Gyr, 250 | units.Myr)
make_hr_diagram(binaries)
```
#### File: examples/simple/cluster.py
```python
from __future__ import print_function
import numpy
from matplotlib import pyplot
from amuse.units import nbody_system
from amuse.community.hermite0.interface import Hermite
# import logging
from amuse.ic.plummer import new_plummer_model
# logging.basicConfig(level=logging.DEBUG)
smoothing_length = 0.0 | nbody_system.length ** 2
def print_log(time, gravity, particles, total_energy_at_t0):
kinetic_energy = gravity.kinetic_energy
potential_energy = gravity.potential_energy
total_energy_at_this_time = kinetic_energy + potential_energy
print("time : ", time)
print("energy error : ", (
total_energy_at_this_time - total_energy_at_t0) / total_energy_at_t0)
def simulate_small_cluster(
number_of_stars=1000,
end_time=40 | nbody_system.time,
number_of_workers=1
):
particles = new_plummer_model(number_of_stars)
particles.scale_to_standard()
gravity = Hermite(number_of_workers=number_of_workers)
gravity.parameters.epsilon_squared = 0.15 | nbody_system.length ** 2
gravity.particles.add_particles(particles)
from_gravity_to_model = gravity.particles.new_channel_to(particles)
time = 0.0 * end_time
total_energy_at_t0 = gravity.kinetic_energy + gravity.potential_energy
positions_at_different_times = []
positions_at_different_times.append(particles.position)
times = []
times.append(time)
print("evolving the model until t = " + str(end_time))
while time < end_time:
time += end_time / 3.0
gravity.evolve_model(time)
from_gravity_to_model.copy()
positions_at_different_times.append(particles.position)
times.append(time)
print_log(time, gravity, particles, total_energy_at_t0)
gravity.stop()
return times, positions_at_different_times
def adjust_spines(ax, spines, ticks):
for loc, spine in ax.spines.iteritems():
if loc in spines:
spine.set_position(('outward', 10)) # outward by 10 points
spine.set_smart_bounds(True)
else:
spine.set_color('none') # don't draw spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
ax.yaxis.set_ticks(ticks)
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_ticks(ticks)
else:
ax.xaxis.set_ticks([])
def plot_positions(times, positions_at_different_times):
figure = pyplot.figure()
plot_matrix_size = numpy.ceil(numpy.sqrt(
len(positions_at_different_times))).astype(int)
number_of_rows = len(positions_at_different_times) / plot_matrix_size
figure.subplots_adjust(wspace=0.15, hspace=0.15)
for index, (time, positions) in enumerate(
zip(times, positions_at_different_times)
):
subplot = figure.add_subplot(
plot_matrix_size, plot_matrix_size, index + 1)
subplot.scatter(
positions[..., 0].value_in(nbody_system.length),
positions[..., 1].value_in(nbody_system.length),
s=1,
edgecolors='red',
facecolors='red'
)
subplot.set_xlim(-4.0, 4.0)
subplot.set_ylim(-4.0, 4.0)
title = 'time = {0:.2f}'.format(time.value_in(nbody_system.time))
subplot.set_title(title) # , fontsize=12)
spines = []
if index % plot_matrix_size == 0:
spines.append('left')
if index >= ((number_of_rows - 1)*plot_matrix_size):
spines.append('bottom')
adjust_spines(subplot, spines, numpy.arange(-4.0, 4.1, 1.0))
if index % plot_matrix_size == 0:
subplot.set_ylabel('y')
if index >= ((number_of_rows - 1)*plot_matrix_size):
subplot.set_xlabel('x')
pyplot.show()
if __name__ == "__main__":
times, positions_at_different_time = simulate_small_cluster(
300,
9.0 | nbody_system.time
)
plot_positions(times, positions_at_different_time)
```
#### File: examples/simple/grav_stellar_simple.py
```python
from __future__ import print_function
import os
from amuse.units.optparse import OptionParser
from amuse.units import units, nbody_system
from amuse.datamodel.particles import Channels
from amuse.community.hermite0.interface import Hermite
from amuse.community.seba.interface import SeBa
from amuse.couple.bridge import Bridge
from amuse.ic.plummer import new_plummer_model
from amuse.ic.salpeter import new_salpeter_mass_distribution
from matplotlib import pyplot
from amuse import plot as aplot
from amuse.support.console import set_printing_strategy
def create_stars(number_of_stars, size):
masses = new_salpeter_mass_distribution(
number_of_stars, mass_min=2 | units.MSun)
converter = nbody_system.nbody_to_si(masses.sum(), size)
stars = new_plummer_model(number_of_stars, convert_nbody=converter)
stars.mass = masses
stars.zams_mass = masses
return stars, converter
def plot_results(stars, time):
mass_loss = stars.zams_mass - stars.mass
x = stars.x.in_(units.parsec)
y = stars.y.in_(units.parsec)
pyplot.figure(figsize=(8, 8))
aplot.plot(x, y, "*")
for x, y, mass_loss in zip(x.number, y.number, mass_loss):
pyplot.annotate(
"%0.2f" % abs(mass_loss.number),
xy=(x, y+2),
horizontalalignment='center',
verticalalignment='bottom',
)
pyplot.axis('equal')
pyplot.xlim([-60, 60])
pyplot.ylim([-60, 60])
aplot.xlabel("x")
aplot.ylabel("y")
pyplot.title("time = " + str(time))
if not os.path.exists("plots"):
os.mkdir("plots")
name = "plots/plot_{0:=05}.png".format(int(time.value_in(units.Myr)))
print("creating", name)
pyplot.savefig(name)
pyplot.close()
def gravity_and_stellar_evolution(
number_of_stars, size, end_time, sync_timestep=1 | units.Myr,
plot_timestep=10 | units.Myr):
stars, converter = create_stars(number_of_stars, size)
gravity = Hermite(converter)
stellar = SeBa()
bridge = Bridge()
gravity.particles.add_particles(stars)
stellar.particles.add_particles(stars)
bridge.add_system(gravity)
bridge.add_system(stellar)
bridge.channels.add_channel(stellar.particles.new_channel_to(
gravity.particles, attributes=["mass", "radius"]))
bridge.timestep = sync_timestep
plot_channels = Channels()
plot_channels.add_channel(stellar.particles.new_channel_to(stars))
plot_channels.add_channel(gravity.particles.new_channel_to(stars))
time = 0 | units.Myr
while time <= end_time:
bridge.evolve_model(time)
plot_channels.copy()
plot_results(stars, time)
time += plot_timestep
def parse_arguments():
parser = OptionParser()
parser.add_option(
"-N", dest="number_of_stars", type="int", default=100,
help="The number of stars in the cluster [%default].")
parser.add_option(
"-s", dest="size", type="float", unit=units.parsec, default=10,
help="The total size of the cluster [%default %unit].")
parser.add_option(
"-t", dest="end_time", type="float", unit=units.Gyr, default=0.1,
help="The end time of the simulation [%default %unit].")
options, args = parser.parse_args()
return options.__dict__
if __name__ == "__main__":
options = parse_arguments()
set_printing_strategy(
"custom",
preferred_units=[
units.MSun, units.parsec, units.Myr
],
precision=3)
gravity_and_stellar_evolution(**options)
```
#### File: examples/syllabus/molecular_cloud_to_star_cluster.py
```python
import numpy
from matplotlib import pyplot
from amuse.lab import *
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.community.fi.interface import Fi
from amuse.ext.molecular_cloud import molecular_cloud
from amuse.ext.evrard_test import body_centered_grid_unit_cube
from amuse.ext.derived_grav_systems import copycat
from amuse.ext.bridge import bridge
from amuse.io import write_set_to_file
from amuse.support.data import ParticlesWithUnitsConverted
from amuse.datamodel import Particles
def make_map(sph,N=100,L=1):
x,y=numpy.indices( ( N+1,N+1 ))
x=L*(x.flatten()-N/2.)/N
y=L*(y.flatten()-N/2.)/N
z=x*0.
vx=0.*x
vy=0.*x
vz=0.*x
x=units.parsec(x)
y=units.parsec(y)
z=units.parsec(z)
vx=units.kms(vx)
vy=units.kms(vy)
vz=units.kms(vz)
rho,rhovx,rhovy,rhovz,rhoe=sph.get_hydro_state_at_point(x,y,z,vx,vy,vz)
rho=rho.reshape((N+1,N+1))
return rho
'''
def write_output(filename, parts, conv):
output= file(filename, 'w')
for i in range (0,len(parts)):
#print i
rho = conv.to_nbody(parts.rho[i])
mass= conv.to_nbody(parts.mass[i])
x= conv.to_nbody(parts.x[i])
y= conv.to_nbody(parts.y[i])
z= conv.to_nbody(parts.z[i])
vx= conv.to_nbody(parts.vx[i])
vy= conv.to_nbody(parts.vy[i])
vz= conv.to_nbody(parts.vz[i])
print>> output, rho.value_in(nbody_system.mass/nbody_system.length**3), mass.value_in(nbody_system.mass), x.value_in(nbody_system.length), y.value_in(nbody_system.length), z.value_in(nbody_system.length), vx.value_in(nbody_system.length/nbody_system.time), vy.value_in(nbody_system.length/nbody_system.time),vz.value_in(nbody_system.length/nbody_system.time)
output.close()
return 0
'''
def write_output(filename, parts, conv):
particles_nbody = ParticlesWithUnitsConverted(parts, conv.as_converter_from_nbody_to_si())
#print particles_nbody
write_set_to_file(particles_nbody, filename, "txt", attribute_names= ('rho', 'mass', 'x', 'y', 'z','vx', 'vy', 'vz'))
return 0
def plot_stars(time, stars, i, L=6.):
fig=pyplot.figure(figsize=(12,12))
m = 100.0*stars.mass/max(stars.mass)
x = -stars.x.value_in(units.parsec)
y = stars.y.value_in(units.parsec)
pyplot.scatter(x, y, s=m)
pyplot.title("Star cluster at"+time.as_string_in(units.Myr))
pyplot.xlim(-L/2., L/2.)
pyplot.ylim(-L/2., L/2.)
pyplot.xlabel("x [pc]")
pyplot.ylabel("y [pc]")
pyplot.savefig("SC_"+str(i)+".png")
def plot_hydro(time, sph, i, L=10):
fig=pyplot.figure(figsize=(12,12))
rho=make_map(sph,N=200,L=L)
pyplot.imshow(numpy.log10(1.e-5+rho.value_in(units.amu/units.cm**3)), extent=[-L/2,L/2,-L/2,L/2],vmin=1,vmax=5)
# subplot.set_title("GMC at zero age")
pyplot.title("Molecular cloud at time="+time.as_string_in(units.Myr))
pyplot.xlabel("x [pc]")
pyplot.ylabel("x [pc]")
pyplot.title("GMC at time="+time.as_string_in(units.Myr))
pyplot.savefig("GMC_"+str(i)+".png")
def plot_hydro_and_stars(time, sph, L=10):
fig=pyplot.figure(figsize=(12,12))
rho=make_map(sph,N=200,L=L)
pyplot.imshow(numpy.log10(1.e-5+rho.value_in(units.amu/units.cm**3)), extent=[-L/2,L/2,-L/2,L/2],vmin=1,vmax=5)
# subplot.set_title("GMC at zero age")
stars = get_stars_from_molecular_clous(sph.gas_particles)
m = 100.0*stars.mass/max(stars.mass)
x = -stars.x.value_in(units.parsec)
y = stars.y.value_in(units.parsec)
pyplot.scatter(x, y, s=m)
pyplot.xlim(-L/2., L/2.)
pyplot.ylim(-L/2., L/2.)
pyplot.title("Molecular cloud at time="+time.as_string_in(units.Myr))
pyplot.xlabel("x [pc]")
pyplot.ylabel("x [pc]")
pyplot.title("GMC at time="+time.as_string_in(units.Myr))
pyplot.savefig("GMC_SC.png")
def run_molecular_cloud(N=100, Mcloud=100. | units.MSun, Rcloud=1. | units.parsec):
conv = nbody_system.nbody_to_si(Mcloud,Rcloud)
rho_cloud = 3.*Mcloud/(4.*numpy.pi*Rcloud**3)
print rho_cloud
tff = 0.5427/numpy.sqrt(constants.G*rho_cloud)
print "t_ff=", tff.value_in(units.Myr), 'Myr'
dt = 5.e-2 | units.Myr
tend=1.0 | units.Myr
# tend=2.0 | units.Myr
parts=molecular_cloud(targetN=N,convert_nbody=conv,
base_grid=body_centered_grid_unit_cube, seed=100).result
sph=Fi(conv, number_of_workers=3)
sph.parameters.use_hydro_flag=True
sph.parameters.radiation_flag=False
sph.parameters.gamma=1
sph.parameters.isothermal_flag=True
sph.parameters.integrate_entropy_flag=False
sph.parameters.timestep=dt
sph.parameters.verbosity = 0
sph.parameters.eps_is_h_flag = False# h_smooth is constant
eps = 0.1 | units.parsec
sph.parameters.gas_epsilon = eps
sph.parameters.sph_h_const = eps
parts.h_smooth= eps
print 'eps-h flag', sph.get_eps_is_h(), sph.get_consthsm()
expected_dt = 0.2*numpy.pi*numpy.power(eps, 1.5)/numpy.sqrt(constants.G*Mcloud/N)
print "dt_exp=", expected_dt.value_in(units.Myr)
print "dt=", dt
print "eps=", sph.parameters.gas_epsilon.in_(units.parsec)
sph.gas_particles.add_particles(parts)
#grav=copycat(Fi, sph, conv)
#sys=bridge(verbose=False)
#sys.add_system(sph,(grav,),False)
channel_from_sph_to_parts= sph.gas_particles.new_channel_to(parts)
channel_from_parts_to_sph= parts.new_channel_to(sph.gas_particles)
i=0
L=6
E0 = 0.0
ttarget = 0.0 | units.Myr
plot_hydro(ttarget, sph, i, L)
while ttarget < tend:
ttarget=float(i)*dt
print ttarget
sph.evolve_model(ttarget, timestep=dt)
E = sph.gas_particles.kinetic_energy()+sph.gas_particles.potential_energy() + sph.gas_particles.thermal_energy()
E_th = sph.gas_particles.thermal_energy()
if i==0:
E0 = E
Eerr = (E-E0)/E0
print 'energy=', E, 'energy_error=', Eerr, 'e_th=', E_th
channel_from_sph_to_parts.copy()
"""
filename = 'm400k_r10pc_e01_'+ str(i).zfill(2) + '.dat'
print filename
parts_sorted = parts.sorted_by_attribute('rho')
write_output(filename, parts_sorted, conv)
"""
plot_hydro(ttarget, sph, i, L)
i=i+1
plot_hydro_and_stars(ttarget, sph, L)
sph.stop()
return parts
def make_stars(cluster_particle):
sfe = 0.3
mmean = 1.0|units.MSun
N = int(sfe*cluster_particle.mass/mmean)
stars = Particles(0)
print "N_cluster=", N
if N>0:
masses = new_salpeter_mass_distribution(N, 0.3|units.MSun, min(100|units.MSun, cluster_particle.mass))
r = cluster_particle.h_smooth
converter=nbody_system.nbody_to_si(masses.sum(),r)
stars = new_plummer_model(N, convert_nbody=converter)
stars.mass = masses
stars.position += cluster_particle.position
stars.velocity += cluster_particle.velocity
return stars
def get_stars_from_molecular_clous(parts):
cutoff_density = 10000 | units.amu/units.cm**3
stars = Particles(0)
for ip in parts:
if ip.rho>cutoff_density:
local_stars = make_stars(ip)
stars.add_particles(local_stars)
return stars
def run_dynamics(bodies, t_end, nsteps):
Mtot_init = bodies.mass.sum()
stellar = SeBa()
stellar.parameters.metallicity = 0.02
stellar.particles.add_particle(bodies)
Rvir = 1|units.parsec
converter=nbody_system.nbody_to_si(stars.mass.sum(),Rvir)
gravity = ph4(converter)
# gravity = bhtree(converter)
gravity.parameters.timestep_parameter = 0.01
gravity.particles.add_particles(bodies)
channel_from_se_to_framework = stellar.particles.new_channel_to(bodies)
channel_from_gd_to_framework = gravity.particles.new_channel_to(bodies)
channel_from_framework_to_gd = bodies.new_channel_to(gravity.particles)
channel_from_se_to_framework.copy_attributes(["mass","radius","luminosity"])
bodies.scale_to_standard(convert_nbody=converter)
filename = "GMC_stars.hdf5"
write_set_to_file(bodies.savepoint(0|units.Myr), filename, 'hdf5')
Etot_init = gravity.kinetic_energy + gravity.potential_energy
Etot_prev = Etot_init
time = 0.0 | t_end.unit
dt = t_end/nsteps
i = 0
plot_stars(time, bodies, i)
while time < t_end:
time += dt
gravity.evolve_model(time)
Etot_prev_se = gravity.kinetic_energy + gravity.potential_energy
stellar.evolve_model(time)
channel_from_gd_to_framework.copy()
channel_from_se_to_framework.copy_attributes(["mass","radius","luminosity", "temperature"])
channel_from_framework_to_gd.copy_attributes(["mass"])
write_set_to_file(bodies.savepoint(time), filename, 'hdf5')
Ekin = gravity.kinetic_energy
Epot = gravity.potential_energy
Etot = Ekin + Epot
dE = Etot_prev-Etot
dE_se = Etot_prev_se-Etot
Mtot = bodies.mass.sum()
print "T=", time,
print "M=", Mtot, "(dM[SE]=", Mtot/Mtot_init, ")",
print "E= ", Etot, "Q= ", Ekin/Epot,
print "dE=", (Etot_init-Etot)/Etot, "ddE=", (Etot_prev-Etot)/Etot,
print "(dE[SE]=", dE_se/Etot, ")"
Etot_init -= dE
Etot_prev = Etot
plot_stars(time, bodies, i)
i+=1
gravity.stop()
stellar.stop()
plot_stars(time, bodies, i, L=20)
if __name__ in ("__main__","__plot__"):
# parts = run_molecular_cloud(4000, Mcloud=400000. | units.MSun, Rcloud=10. | units.parsec)
parts = run_molecular_cloud(1000, Mcloud=10000. | units.MSun, Rcloud=3. | units.parsec)
stars = get_stars_from_molecular_clous(parts)
# write_set_to_file(stars, "stars.hdf5", 'hdf5')
run_dynamics(stars, 10.0|units.Myr, 10)
```
#### File: examples/syllabus/plot_molecular_cloud.py
```python
import numpy
from matplotlib import pyplot
from amuse.lab import *
from amuse.ext.molecular_cloud import molecular_cloud
from amuse.ext.evrard_test import body_centered_grid_unit_cube
from amuse.plot import sph_particles_plot, native_plot
def create_molecular_cloud(N, Mcloud, Rcloud, t_end):
converter = nbody_system.nbody_to_si(Mcloud,Rcloud)
parts=molecular_cloud(targetN=N,convert_nbody=converter,
base_grid=body_centered_grid_unit_cube, seed=100).result
# parts = new_plummer_gas_model(N, convert_nbody=converter)
sph=Fi(converter)
sph.gas_particles.add_particle(parts)
sph.evolve_model(t_end)
ch = sph.gas_particles.new_channel_to(parts)
ch.copy()
sph.stop()
return parts
if __name__ in ("__main__","__plot__"):
sph_particles = create_molecular_cloud(10000, Mcloud=10000. | units.MSun, Rcloud=10. | units.parsec, t_end=1|units.day)
native_plot.figure(figsize = (10, 10), dpi = 50)
sph_particles_plot(sph_particles)
native_plot.show()
```
#### File: examples/syllabus/stellar_gravity_hydro.py
```python
import numpy
from amuse.lab import *
from amuse.couple import bridge
from amuse import datamodel
from amuse.ext.evrard_test import uniform_unit_sphere
def new_sph_particles_from_stellar_wind(stars, mgas):
new_sph=datamodel.Particles(0)
for si in stars:
p = si.position
v = si.velocity
Ngas = int(-si.Mwind/mgas)
print "new Ngas=", si.mass, Ngas,
if Ngas==0:
continue
Mgas = mgas*Ngas
si.Mwind += Mgas
# Ngas = 10
# mgas = Mgas/10.
print "new Ngas=", Ngas, mgas
add=datamodel.Particles(Ngas)
add.mass = mgas
add.h_smooth=0. | units.parsec
dx,dy,dz=uniform_unit_sphere(Ngas).make_xyz()
add.x=si.x+(dx * si.radius)
add.y=si.y+(dy * si.radius)
add.z=si.z+(dz * si.radius)
for ri in range(len(add)):
r = add[ri].position-p
r = r/r.length()
v_wind = (constants.G*si.mass/(add[ri].position-p).length()).sqrt()
add.u= 0.5 * (v_wind)**2
v_wind = si.terminal_wind_velocity
add.vx=v.x + r[0]*v_wind
add.vy=v.y + r[1]*v_wind
add.vz=v.z + r[2]*v_wind
new_sph.add_particles(add)
return new_sph
def v_terminal_teff(star):
t4=numpy.log10(star.temperature.value_in(units.K))-4.
t4=t4.clip(0.,1.)
return (30 | units.km/units.s) + ((4000 | units.km/units.s)*t4)
def get_kepler_elements(model_time, bh, star, converter):
kep = Kepler(converter)
kep.initialize_code()
pos = bh.position - star.position
vel = bh.velocity - star.velocity
print "Kep:", bh.mass + star.mass, pos[0], pos[1], pos[2], vel[0], vel[1], vel[2]
kep.initialize_from_dyn(bh.mass + star.mass, pos[0], pos[1], pos[2], vel[0], vel[1], vel[2])
a,e = kep.get_elements()
kep.stop()
return a, e
def gravity_hydro_bridge(a, ecc, t_end, n_steps, Rgas, Mgas, Ngas):
stars = Particles(3)
stars.mass = [5.0, 9.9, 10.0] | units.MSun
stellar = SeBa()
stellar.particles.add_particles(stars)
stellar_to_framework = stellar.particles.new_channel_to(stars)
stellar.evolve_model(26|units.Myr)
stellar_to_framework.copy_attributes(["mass","radius","temperature"])
print "stars=", stars
stellar.evolve_model(26.1|units.Myr)
stars.dmdt = (stellar.particles.mass-stars.mass)/(0.1|units.Myr)
stars.Mwind = 0 | units.MSun
stars.terminal_wind_velocity=v_terminal_teff(stars)
stellar.stop()
print "dmdt=", stars.dmdt
dt = 0.1|units.day
mgas = 0.1*abs(stars.dmdt.sum()*dt)
print "mgas=", mgas.value_in(units.MJupiter), stars.dmdt/mgas
vc = constants.G*stars.mass.sum()/a
Porb = 2*numpy.pi*(a**3/(constants.G*stars.mass.sum())).sqrt()
stars[0].position = (0,0,0) | units.AU
stars[0].velocity = (0,0,0) | units.kms
vc = (constants.G*stars[:2].mass.sum()/(a*(1+ecc))).sqrt()
vc *= numpy.sqrt((1-ecc)/(1+ecc))
stars[1].position = (a.value_in(units.AU),0,0) | units.AU
stars[1].velocity = (0,vc.value_in(units.kms),0) | units.kms
stars[:2].move_to_center()
ecc = 0.2
vc = (constants.G*stars.mass.sum()/(10*a*(1+ecc))).sqrt()
vc *= numpy.sqrt((1-ecc)/(1+ecc))
stars[2].position = (10*a.value_in(units.AU),0,0) | units.AU
stars[2].velocity = (0,vc.value_in(units.kms),0) | units.kms
stars.move_to_center()
stars.radius = 0.2*a
#define for printing
# stars.h_smooth= 0.0*a
# stars.u = 0 | units.kms**2
converter=nbody_system.nbody_to_si(stars.mass.sum(), a)
gravity = ph4(converter, redirection="none")
gravity.particles.add_particles(stars)
gravity.parameters.epsilon_squared = (10|units.RSun)**2
Ed0_tot = gravity.kinetic_energy + gravity.potential_energy
channel_from_gravity = gravity.particles.new_channel_to(stars)
channel_from_to_gravity = stars.new_channel_to(gravity.particles)
dt = t_end/float(n_steps)
converter=nbody_system.nbody_to_si(1.0|units.MSun, a)
ism = Particles(0)
ism.mass = mgas
ism.position = (0,0,0)|units.AU
ism.velocity = (0,0,0)|units.kms
ism.u = 0 | units.m**2 * units.s**-2
ism.h_smooth= 0.01*a
hydro = Fi(converter, redirection="none")
hydro.parameters.timestep = dt/8.
hydro.parameters.use_hydro_flag=True
hydro.parameters.radiation_flag=False
hydro.parameters.self_gravity_flag=True
hydro.parameters.integrate_entropy_flag=False
hydro.parameters.gamma=1.
hydro.parameters.isothermal_flag=True
hydro.parameters.epsilon_squared = (10|units.RSun)**2
if len(ism)>0:
hydro.gas_particles.add_particles(ism)
Eh0_tot = hydro.kinetic_energy + hydro.potential_energy + hydro.thermal_energy
hydro.parameters.periodic_box_size = 10000*a
channel_from_hydro = hydro.gas_particles.new_channel_to(ism)
channel_from_to_hydro = ism.new_channel_to(hydro.gas_particles)
moving_bodies = ParticlesSuperset([stars, ism])
model_time = 0 | units.Myr
filename = "stellargravhydro.hdf5"
if len(ism)>0:
write_set_to_file(moving_bodies, filename, 'hdf5')
gravhydro = bridge.Bridge(use_threading=False)
gravhydro.add_system(gravity, (hydro,) )
gravhydro.add_system(hydro, (gravity,) )
gravhydro.timestep = min(dt, 2*hydro.parameters.timestep)
istep = 0
while model_time < t_end:
model_time += dt
a, e = get_kepler_elements(gravity.model_time, stars[0], stars[1], converter)
print "AB: time=", model_time, a, e
com_star = Particles(1)
com_star.mass = stars[:2].mass.sum()
com_star.position = stars[:2].center_of_mass()
com_star.velocity = stars[:2].center_of_mass_velocity()
a, e = get_kepler_elements(gravity.model_time, com_star[0], stars[2], converter)
print "(AB)C: time=", model_time, a, e
stars.Mwind += stars.dmdt*dt
print "Mw=", stars.Mwind, stars.Mwind/mgas
new_sph = new_sph_particles_from_stellar_wind(stars, mgas)
print "Ngas=", len(new_sph), len(ism), len(hydro.gas_particles)
if len(new_sph)>0: # and len(bodies)<4000:
ism.add_particles(new_sph)
ism.synchronize_to(hydro.gas_particles)
if len(ism)>100:
print "t=", hydro.model_time, dt
gravhydro.evolve_model(model_time)
channel_from_gravity.copy()
channel_from_hydro.copy()
channel_from_hydro.copy_attributes(["u"])
print "N=", len(hydro.particles)
Ed_tot = gravity.kinetic_energy + gravity.potential_energy
Eh_tot = hydro.kinetic_energy + hydro.potential_energy + hydro.thermal_energy
print "Energies:", Ed_tot/Ed0_tot, Eh_tot/Eh0_tot
if istep%10==0:
write_set_to_file(moving_bodies, filename, 'hdf5')
istep+=1
gravity.stop()
hydro.stop()
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-n", dest="n_steps", type="int", default = 1000,
help="number of diagnostics time steps [%default]")
result.add_option("-N", dest="Ngas", type="int", default = 1024,
help="number of gas particles [%default]")
result.add_option("-M", unit=units.MSun,
dest="Mgas", type="float", default = 1|units.MSun,
help="Mass of the gas [%default]")
result.add_option("-R", unit=units.AU,
dest="Rgas", type="float", default = 1|units.AU,
help="Size of the gas distribution [%default]")
result.add_option("-a", unit=units.AU,
dest="a", type="float", default = 0.2|units.AU,
help="initial orbital separation [%default]")
result.add_option("-e", dest="ecc", type="float", default = 0.0,
help="initial orbital eccentricity [%default]")
result.add_option("-t", unit=units.yr,
dest="t_end", type="float", default = 10|units.yr,
help="end time of the simulation [%default]")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
gravity_hydro_bridge(**o.__dict__)
```
#### File: examples/textbook/evolve_triple_with_wind_2.py
```python
import sys
import math, numpy
from optparse import OptionParser
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
from prepare_figure import single_frame
from distinct_colours import get_distinct
from amuse.units import units, constants, nbody_system
from amuse.units.quantities import zero
from amuse.datamodel import Particle, Particles
from amuse.support.console import set_printing_strategy
from amuse.io import store
from amuse.ext.orbital_elements import new_binary_from_orbital_elements
from amuse.ext.orbital_elements import orbital_elements_from_binary
from amuse.community.symple.interface import symple # symplectic
from amuse.community.huayno.interface import Huayno # symplectic
from amuse.community.smalln.interface import SmallN # time reversible
from amuse.community.hermite0.interface import Hermite # not symplectic
from amuse.community.seba.interface import SeBa
from amuse.community.sse.interface import SSE
def orbital_period(a, Mtot):
return 2*numpy.pi*(a**3/(constants.G*Mtot)).sqrt()
def semimajor_axis(P, Mtot):
return (constants.G*Mtot*P**2/(4*numpy.pi**2))**(1./3)
def get_orbital_elements_of_triple(stars):
inner_binary = stars[0]+stars[1]
outer_binary = Particles(1)
outer_binary[0].mass = inner_binary.mass.sum()
outer_binary[0].position = inner_binary.center_of_mass()
outer_binary[0].velocity = inner_binary.center_of_mass_velocity()
outer_binary.add_particle(stars[2])
M1, M2, ain, ein, ta_in, inc_in, lan_in, aop_in \
= orbital_elements_from_binary(inner_binary, G=constants.G)
M12, M3, aout, eout, ta_out, outc_out, lan_out, aop_out \
= orbital_elements_from_binary(outer_binary, G=constants.G)
return ain, ein, aout, eout
def evolve_triple_with_wind(M1, M2, M3, Pora, Pin_0, ain_0, aout_0,
ein_0, eout_0, t_end, nsteps, scheme, integrator,
t_stellar, dt_se, dtse_fac, interp):
import random
from amuse.ext.solarsystem import get_position
numpy.random.seed(42)
print "Initial masses:", M1, M2, M3
triple = Particles(3)
triple[0].mass = M1
triple[1].mass = M2
triple[2].mass = M3
stellar = SeBa()
stellar.particles.add_particles(triple)
channel_from_stellar = stellar.particles.new_channel_to(triple)
# Evolve to t_stellar.
stellar.evolve_model(t_stellar)
channel_from_stellar.copy_attributes(["mass"])
M1 = triple[0].mass
M2 = triple[1].mass
M3 = triple[2].mass
print "t=", stellar.model_time.in_(units.Myr)
print "M=", stellar.particles.mass.in_(units.MSun)
print "R=", stellar.particles.radius.in_(units.RSun)
print "L=", stellar.particles.luminosity.in_(units.LSun)
print "T=", stellar.particles.temperature.in_(units.K)
print "Mdot=", \
-stellar.particles.wind_mass_loss_rate.in_(units.MSun/units.yr)
# Start the dynamics.
# Inner binary:
tmp_stars = Particles(2)
tmp_stars[0].mass = M1
tmp_stars[1].mass = M2
if Pora == 1:
ain_0 = semimajor_axis(Pin_0, M1+M2)
else:
Pin_0 = orbital_period(ain_0, M1+M2)
print 'Pin =', Pin_0
print 'ain_0 =', ain_0
print 'M1+M2 =', M1+M2
print 'Pin_0 =', Pin_0.value_in(units.day), '[day]'
#print 'semi:', semimajor_axis(Pin_0, M1+M2).value_in(units.AU), 'AU'
#print 'period:', orbital_period(ain_0, M1+M2).value_in(units.day), '[day]'
dt_init = 0.01*Pin_0
ma = 180
inc = 60
aop = 180
lon = 0
r,v = get_position(M1, M2, ein_0, ain_0, ma, inc, aop, lon, dt_init)
tmp_stars[1].position = r
tmp_stars[1].velocity = v
tmp_stars.move_to_center()
# Outer binary:
r,v = get_position(M1+M2, M3, eout_0, aout_0, 0, 0, 0, 0, dt_init)
tertiary = Particle()
tertiary.mass = M3
tertiary.position = r
tertiary.velocity = v
tmp_stars.add_particle(tertiary)
tmp_stars.move_to_center()
triple.position = tmp_stars.position
triple.velocity = tmp_stars.velocity
Mtriple = triple.mass.sum()
Pout = orbital_period(aout_0, Mtriple)
print "T=", stellar.model_time.in_(units.Myr)
print "M=", stellar.particles.mass.in_(units.MSun)
print "Pout=", Pout.in_(units.Myr)
print 'tK =', ((M1+M2)/M3)*Pout**2*(1-eout_0**2)**1.5/Pin_0
converter = nbody_system.nbody_to_si(triple.mass.sum(), aout_0)
if integrator == 0:
gravity = Hermite(converter)
gravity.parameters.timestep_parameter = 0.01
elif integrator == 1:
gravity = SmallN(converter)
gravity.parameters.timestep_parameter = 0.01
gravity.parameters.full_unperturbed = 0
elif integrator == 2:
gravity = Huayno(converter)
gravity.parameters.inttype_parameter = 20
gravity.parameters.timestep = (1./256)*Pin_0
else:
gravity = symple(converter)
gravity.parameters.integrator = 10
#gravity.parameters.timestep_parameter = 0.
gravity.parameters.timestep = (1./128)*Pin_0
print gravity.parameters
gravity.particles.add_particles(triple)
channel_from_framework_to_gd = triple.new_channel_to(gravity.particles)
channel_from_gd_to_framework = gravity.particles.new_channel_to(triple)
Etot_init = gravity.kinetic_energy + gravity.potential_energy
Etot_prev = Etot_init
gravity.particles.move_to_center()
# Note: time = t_diag = 0 at the start of the dynamical integration.
dt_diag = t_end/float(nsteps)
t_diag = dt_diag
time = 0.0 | t_end.unit
t_se = t_stellar + time
print 't_end =', t_end
print 'dt_diag =', dt_diag
ain, ein, aout, eout = get_orbital_elements_of_triple(triple)
print "Triple elements t=", time, \
"inner:", triple[0].mass, triple[1].mass, ain, ein, \
"outer:", triple[2].mass, aout, eout
t = [time.value_in(units.Myr)]
Mtot = triple.mass.sum()
mtot = [Mtot.value_in(units.MSun)]
smai = [ain/ain_0]
ecci = [ein/ein_0]
smao = [aout/aout_0]
ecco = [eout/eout_0]
if interp:
# Create arrays of stellar times and masses for interpolation.
times = [time]
masses = [triple.mass.copy()]
while time < t_end:
time += dt_se
stellar.evolve_model(t_stellar+time)
channel_from_stellar.copy_attributes(["mass"])
times.append(time)
masses.append(triple.mass.copy())
time = 0.0 | t_end.unit
print '\ntimes:', times, '\n'
# Evolve the system.
def advance_stellar(t_se, dt):
E0 = gravity.kinetic_energy + gravity.potential_energy
t_se += dt
if interp:
t = t_se-t_stellar
i = int(t/dt_se)
mass = masses[i] + (t-times[i])*(masses[i+1]-masses[i])/dt_se
triple.mass = mass
#print 't_se =', t_se, 'masses =', mass
else:
stellar.evolve_model(t_se)
channel_from_stellar.copy_attributes(["mass"])
channel_from_framework_to_gd.copy_attributes(["mass"])
return t_se, gravity.kinetic_energy + gravity.potential_energy - E0
def advance_gravity(tg, dt):
tg += dt
gravity.evolve_model(tg)
channel_from_gd_to_framework.copy()
return tg
while time < t_end:
if scheme == 1:
# Advance to the next diagnostic time.
dE_se = zero
dt = t_diag - time
if dt > 0|dt.unit:
time = advance_gravity(time, dt)
elif scheme == 2:
# Derive dt from Pin using dtse_fac.
dt = dtse_fac*Pin_0
if time + dt > t_diag: dt = t_diag - time
if dt > 0|dt.unit:
t_se, dE_se = advance_stellar(t_se, dt)
time = advance_gravity(time, dt)
elif scheme == 3:
# Derive dt from Pin using dtse_fac.
dt = dtse_fac*Pin_0
if time + dt > t_diag: dt = t_diag - time
if dt > 0|dt.unit:
time = advance_gravity(time, dt)
t_se, dE_se = advance_stellar(t_se, dt)
elif scheme == 4:
# Derive dt from Pin using dtse_fac.
dt = dtse_fac*Pin_0
if time + dt > t_diag: dt = t_diag - time
if dt > 0|dt.unit:
t_se, dE_se = advance_stellar(t_se, 0.5*dt)
time = advance_gravity(time, dt)
t_se, dE_se2 = advance_stellar(t_se, 0.5*dt)
dE_se += dE_se2
elif scheme == 5:
# Use the specified dt_se.
dE_se = zero
dt = dt_se
if time + dt > t_diag: dt = t_diag - time
if dt > 0|dt.unit:
# For use with symple only: set up average mass loss.
channel_from_stellar.copy_attributes(["mass"])
m0 = triple.mass.copy()
stellar.evolve_model(t_se+dt)
channel_from_stellar.copy_attributes(["mass"])
t_se = stellar.model_time
m1 = triple.mass
dmdt = (m1-m0)/dt
for i in range(len(dmdt)):
gravity.set_dmdt(i, dmdt[i])
time = advance_gravity(time, dt)
else:
print 'unknown option'
sys.exit(0)
if time >= t_diag:
t_diag = time + dt_diag
Ekin = gravity.kinetic_energy
Epot = gravity.potential_energy
Etot = Ekin + Epot
dE = Etot_prev - Etot
Mtot = triple.mass.sum()
print "T=", time,
print "M=", Mtot, "(dM[SE]=", Mtot/Mtriple, ")",
print "E= ", Etot, "Q= ", Ekin/Epot,
print "dE=", (Etot_init-Etot)/Etot, "ddE=", (Etot_prev-Etot)/Etot,
print "(dE[SE]=", dE_se/Etot, ")"
Etot_init -= dE
Etot_prev = Etot
ain, ein, aout, eout = get_orbital_elements_of_triple(triple)
print "Triple elements t=", t_stellar + time, \
"inner:", triple[0].mass, triple[1].mass, ain, ein, \
"outer:", triple[2].mass, aout, eout
t.append(time.value_in(units.yr))
mtot.append(Mtot.value_in(units.MSun))
smai.append(ain/ain_0)
ecci.append(ein/ein_0)
smao.append(aout/aout_0)
ecco.append(eout/eout_0)
if eout > 1 or aout <= zero:
print "Binary ionized or merged"
break
gravity.stop()
stellar.stop()
return t, mtot, smai, ecci, smao, ecco
def main(M1, M2, M3, Pora, Pin, ain, aout, ein, eout,
t_end, nsteps, scheme, integrator,
t_stellar, dt_se, dtse_fac, interp, show):
two_frames = False
plot_ae = True
color = get_distinct(4)
if two_frames:
plt.figure(figsize=(10, 8))
else:
plt.figure(figsize=(12, 8))
if scheme == 5:
if integrator != 3:
print 'Warning: scheme = 5 forces integrator = 3'
integrator = 3
srange = [1,scheme] # 1 = no mass loss; other = mass loss
# assume scheme > 1
i = 0
lw = [1,2]
srange = [1, scheme]
for s in srange:
time, mtot, ai, ei, ao, eo \
= evolve_triple_with_wind(M1, M2, M3,
Pora, Pin, ain, aout,
ein, eout,
t_end, nsteps,
s, integrator,
t_stellar, dt_se,
dtse_fac, interp)
if i == 0:
if two_frames: plt.subplot(1,2,1)
plt.plot(time, ai, c=color[0], linewidth=lw[i],
label='inner, no mass loss')
plt.plot(time, ao, c=color[3], linewidth=lw[i],
label='outer, no mass loss')
plt.xlabel('time (yr)')
plt.ylabel('$a/a_0$')
if two_frames:
plt.subplot(1,2,2)
if plot_ae:
plt.plot(ai, ei, c=color[0], linewidth=lw[i])
plt.plot(ao, eo, c=color[3], linewidth=lw[i])
plt.xlabel('$a/a_0$')
plt.ylabel('$e/e_0$')
else:
plt.plot(time, mtot, c=color[0], linewidth=lw[i])
plt.xlabel('time (yr)')
plt.ylabel('M')
i = 1
else:
if two_frames: plt.subplot(1,2,1)
plt.plot(time, ai, c=color[1], linewidth=lw[i],
label='inner, mass loss')
plt.plot(time, ao, c=color[2], linewidth=lw[i],
label='outer, mass loss')
if two_frames:
plt.subplot(1,2,2)
if plot_ae:
plt.plot(ai, ei, c=color[1], linewidth=lw[i])
plt.plot(ao, eo, c=color[2], linewidth=lw[i])
else:
plt.plot(time, mtot, c=color[1], linewidth=lw[i])
if two_frames: plt.subplot(1,2,1)
plt.legend(loc='best')
integrators = ['hermite', 'smalln', 'huayno', 'symple']
label = integrators[integrator]
label += ' integrator, stellev scheme= {:d}'.format(scheme)
save_file \
= 'evolve_triple_with_wind_t={:.3f}'.format(t_end.value_in(units.Myr)) \
+'_i={:d}_s={:d}'.format(integrator, scheme)
if scheme < 5:
label += ', dtse_fac = {:.3f}'.format(dtse_fac)
save_file += '_dtsefac={:.3f}'.format(dtse_fac)
else:
label += ', dt_se = {:.1f}'.format(dt_se.value_in(units.yr))
save_file += '_dtse={:.1f}'.format(dt_se.value_in(units.yr))
save_file += '.png'
if two_frames:
plt.tight_layout()
plt.subplots_adjust(top=0.88)
#plt.suptitle(label, y=0.97, fontsize=15)
ax = plt.gca()
ax.minorticks_on() # switch on the minor ticks
ax.tick_params(axis='both', which='both', direction='in')
ax.locator_params(nbins=3)
ax.get_yaxis().get_major_formatter().set_useOffset(False)
plt.savefig(save_file, dpi=300)
print '\nSaved figure in file', save_file,'\n'
if show: plt.show()
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("--ain", unit=units.AU,
dest="ain", type="float", default = 0.63|units.AU,
help="orbital separation [%default]")
result.add_option("--aout", unit=units.AU,
dest="aout", type="float", default = 100|units.AU,
help="orbital separation [%default]")
result.add_option("--dtse", unit=units.Myr,
dest="dt_se", type="float", default = 1.e-3|units.Myr,
help="stellar mass-loss time step [%default]")
result.add_option("--dtse_fac",
dest="dtse_fac", type="float", default = 0.1,
help="stellar mass-loss time step fraction [%default]")
result.add_option("--ein",
dest="ein", type="float", default = 0.2,
help="orbital eccentricity [%default]")
result.add_option("--eout",
dest="eout", type="float", default = 0.6,
help="orbital eccentricity [%default]")
result.add_option("-i",
dest="integrator", type="int", default = 2,
help="integration scheme [%default]")
result.add_option("-I",
dest="interp", action="store_false", default = True,
help="interpolate stellar evolution [%default]")
result.add_option("--M1", unit=units.MSun,
dest="M1", type="float", default = 60|units.MSun,
help="Primary mass [%default]")
result.add_option("--M2", unit=units.MSun,
dest="M2", type="float", default = 30|units.MSun,
help="secondary mass [%default]")
result.add_option("--M3", unit=units.MSun,
dest="M3", type="float", default = 20|units.MSun,
help="secondary mass [%default]")
result.add_option("-n",
dest="nsteps", type="int", default = 1000,
help="number of data points [%default]")
result.add_option("--Pin", unit=units.day,
dest="Pin", type="float", default = 19|units.day,
help="orbital period [%default]")
result.add_option("--Pora",
dest="Pora", type="int", default = 1,
help="period (1) or semimajor axis (2) [%default]")
result.add_option("-s",
dest="scheme", type="int", default = 3,
help="stellar integration method [%default]")
result.add_option("-S",
dest="show", action="store_false", default = True,
help="show plot on display [%default]")
result.add_option("-t", unit=units.Myr,
dest="t_end", type="float", default = 1.e-3|units.Myr,
help="end time of the dynamical simulation [%default]")
result.add_option("--ts", unit=units.Myr,
dest="t_stellar", type="float", default = 4.|units.Myr,
help="stellar evolution time [%default]")
return result
if __name__ in ('__main__', '__plot__'):
#set_printing_strategy("custom",
# preferred_units = [units.MSun, units.AU, units.Myr],
# precision = 12, prefix = "",
# separator = " [", suffix = "]")
o, arguments = new_option_parser().parse_args()
print o.__dict__
main(**o.__dict__)
```
#### File: examples/textbook/merge_two_stars_and_evolve_orig.py
```python
import numpy
from matplotlib import pyplot
from amuse.lab import *
from amuse.community.mmams.interface import MakeMeAMassiveStarInterface
from amuse.community.mmams.interface import MakeMeAMassiveStar
from amuse.couple.collision_handler import CollisionHandler
from prepare_figure import single_frame
from distinct_colours import get_distinct
default_options = dict()
def get_density_profile(code=MESA, M=1.0|units.MSun, z=0.02,
t=2|units.Myr):
stellar = code()
stellar.parameters.metallicity = z
stellar.particles.add_particle(Particle(mass=M))
print "Nzones=", stellar.particles.get_number_of_zones()
stellar.evolve_model(t)
radius = stellar.particles[0].get_radius_profile()
rho = stellar.particles[0].get_density_profile()
stellar.stop()
return radius, rho
def print_stars(stellar_evolution):
print "Primary: Time=", stellar_evolution.model_time.in_(units.Myr), \
stellar_evolution.particles[0].mass.in_(units.MSun), \
stellar_evolution.particles[0].radius.in_(units.RSun), \
stellar_evolution.particles[0].temperature.in_(units.K), \
stellar_evolution.particles[0].luminosity.in_(units.LSun)
print "Secondary: Time=", stellar_evolution.model_time.in_(units.Myr), \
stellar_evolution.particles[1].mass.in_(units.MSun), \
stellar_evolution.particles[1].radius.in_(units.RSun), \
stellar_evolution.particles[1].temperature.in_(units.K), \
stellar_evolution.particles[1].luminosity.in_(units.LSun)
def merge_two_stars(Mprim, Msec, tcoll, tend):
stars = Particles(2)
stars.mass = [Mprim.value_in(units.MSun),
Msec.value_in(units.MSun)] | units.MSun
stellar_evolution = MESA()
stellar_evolution.particles.add_particles(stars)
time = [] | units.Myr
mass = [] | units.MSun
radius = [] | units.RSun
temperature = [] | units.K
luminosity = [] | units.LSun
stellar_type = []
nmerge = 0
while stellar_evolution.model_time < tcoll:
stellar_evolution.evolve_model()
print_stars(stellar_evolution)
time.append(stellar_evolution.model_time)
mass.append(stellar_evolution.particles[0].mass)
radius.append(stellar_evolution.particles[0].radius)
temperature.append(stellar_evolution.particles[0].temperature)
luminosity.append(stellar_evolution.particles[0].luminosity)
stellar_type.append(stellar_evolution.particles[0].stellar_type)
nmerge += 1
n_shell = min(stellar_evolution.particles[0].get_number_of_zones(),
stellar_evolution.particles[1].get_number_of_zones())
merger = MakeMeAMassiveStar(**default_options)
merger.parameters.target_n_shells = n_shell
merger.parameters.dump_mixed_flag = True
merger.parameters.do_shock_heating_flag = True
merger.commit_parameters()
handler = CollisionHandler(merger,
stellar_evolution_code = stellar_evolution)
merger_product = handler.handle_collision(stellar_evolution.particles[0],
stellar_evolution.particles[1])
merged = stellar_evolution.particles[0]
print "Stars merged:", merged
stellar_evolution.evolve_model(keep_synchronous=True)
print "star A:", stellar_evolution.particles
while stellar_evolution.model_time < tend:
stellar_evolution.evolve_model()
time.append(stellar_evolution.model_time)
mass.append(stellar_evolution.particles[0].mass)
radius.append(stellar_evolution.particles[0].radius)
temperature.append(stellar_evolution.particles[0].temperature)
luminosity.append(stellar_evolution.particles[0].luminosity)
stellar_type.append(stellar_evolution.particles[0].stellar_type)
print "Time=", time[-1], stellar_type[-1], mass[-1], radius[-1], \
temperature[-1].in_(units.K), luminosity[-1].in_(units.LSun)
if stellar_type[-1] >= 4 | units.stellar_type:
break
print "star B:", stellar_evolution.particles
rho_profile = merged.get_density_profile()
radius_profile = merged.get_radius_profile()
merger.stop()
stellar_evolution.stop()
return time, stellar_type, mass, radius, temperature, luminosity, nmerge
def evolve_single_star(mass, tend):
star = Particles(1)
star.mass = mass
stellar_evolution = MESA()
stellar_evolution.particles.add_particles(star)
time = [] | units.Myr
mass = [] | units.MSun
radius = [] | units.RSun
temperature = [] | units.K
luminosity = [] | units.LSun
stellar_type = []
while stellar_evolution.model_time < tend:
stellar_evolution.evolve_model()
time.append(stellar_evolution.model_time)
mass.append(stellar_evolution.particles[0].mass)
radius.append(stellar_evolution.particles[0].radius)
temperature.append(stellar_evolution.particles[0].temperature)
luminosity.append(stellar_evolution.particles[0].luminosity)
stellar_type.append(stellar_evolution.particles[0].stellar_type)
print "Time=", time[-1], stellar_type[-1], mass[-1], radius[-1], \
temperature[-1].in_(units.K), luminosity[-1].in_(units.LSun)
if stellar_type[-1] >= 4 | units.stellar_type:
break
stellar_evolution.stop()
return time, stellar_type, mass, radius, temperature, luminosity
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("--tcoll", unit=units.Myr,
dest="tcoll", type="float",default = 1|units.Myr,
help="moment of collision [%default]")
result.add_option("--tend", unit=units.Myr,
dest="tend", type="float",default = 1|units.Myr,
help="evolution after the collision [%default]")
result.add_option("-M", unit=units.MSun,
dest="Mprim", type="float",default = 1|units.MSun,
help="Primary ZAMS mass [%default]")
result.add_option("-m", unit=units.MSun,
dest="Msec", type="float",default = 1|units.MSun,
help="Secondary ZAMS mass [%default]")
return result
def plot_post_collision_star(time, mass, radius, temperature, luminosity):
pyplot.subplot(2,2,1)
pyplot.plot(time.value_in(units.Myr), mass.value_in(units.MSun))
pyplot.subplot(2,2,2)
pyplot.plot(time.value_in(units.Myr), radius.value_in(units.RSun))
pyplot.subplot(2,2,3)
pyplot.plot(temperature.value_in(units.K), luminosity.value_in(units.LSun))
pyplot.loglog()
pyplot.show()
if __name__ in ('__main__','__plot__'):
set_printing_strategy("custom", #nbody_converter = converter,
precision = 15, prefix = "",
separator = " [", suffix = "]")
o, arguments = new_option_parser().parse_args()
x_label = "T [K]"
y_label = "L [$L_\odot$]"
figure = single_frame(x_label, y_label, logx=True, logy=True,
xsize=14, ysize=10)
color = get_distinct(4)
pyplot.xlim(5.e+4, 1.e+3)
Mprim = 3.0|units.MSun
Msec = 1.0|units.MSun
tend = 2.0|units.Gyr
print "Evolve single star"
time, stp, mass, radius, temperature, luminosity \
= evolve_single_star(Mprim, tend)
pyplot.plot(temperature.value_in(units.K),
luminosity.value_in(units.LSun),
c=color[1])
pyplot.scatter(temperature[0].value_in(units.K),
luminosity[0].value_in(units.LSun),
c=color[0], marker="^", s=150, lw=0)
tms = 0 |units.Myr
for i in range(len(stp)):
if stp[i] >= 2 | units.stellar_type:
tms = time[i]
if tms <= 1|units.Myr:
tms = 10|units.Myr
print "Main-sequence age:", tms.in_(units.Myr)
tend = tms
print "Evolve single star"
time, stp, mass, radius, temperature, luminosity \
= evolve_single_star(Mprim+Msec, tend)
pyplot.plot(temperature.value_in(units.K),
luminosity.value_in(units.LSun),
c=color[1])
pyplot.scatter(temperature[0].value_in(units.K),
luminosity[0].value_in(units.LSun),
c=color[1], s=150, marker="^")
tcoll = 0.5*tend
print "Evolve two single stars and collide at:", tcoll.in_(units.Myr)
time, stp, mass, radius, temperature, luminosity, nmerge \
= merge_two_stars(Mprim, Msec, tcoll, tend)
pyplot.plot(temperature[nmerge+1:].value_in(units.K),
luminosity[nmerge+1:].value_in(units.LSun),
c=color[2], ls="--")
pyplot.scatter(temperature[nmerge-1:nmerge+1].value_in(units.K),
luminosity[nmerge-1:nmerge+1].value_in(units.LSun),
c=color[2], s=150, marker="o")
tcoll = 2*tcoll
print "Evolve two single stars and collide at:", tcoll.in_(units.Myr)
time, stp, mass, radius, temperature, luminosity, nmerge \
= merge_two_stars(Mprim, Msec, tcoll, tend)
pyplot.plot(temperature[:nmerge].value_in(units.K),
luminosity[:nmerge].value_in(units.LSun),
c=color[3], ls="-")
pyplot.plot(temperature[nmerge+1:].value_in(units.K),
luminosity[nmerge+1:].value_in(units.LSun),
c=color[3], ls="--")
pyplot.savefig("merge_two_stars_and_evolve")
```
#### File: examples/textbook/multiple_stellar_threaded.py
```python
import Queue
import threading
import multiprocessing
from amuse.lab import *
###BOOKLISTSTART1###
code_queue = Queue.Queue()
def remote_worker_code():
code = code_queue.get()
evolve_single_star(code)
code_queue.task_done()
def evolve_with_different_stellar_model(codes):
for ci in codes:
code_queue.put(ci)
n_cpu = multiprocessing.cpu_count()
for i in range(n_cpu):
th = threading.Thread(target=remote_worker_code)
th.daemon = True
th.start()
code_queue.join() # block until all tasks are done
###BOOKLISTSTOP1###
###BOOKLISTSTART2###
def evolve_single_star(code):
stars = Particles(mass=10|units.MSun)
stellar = code()
stellar.particles.add_particles(stars)
channel = stellar.particles.new_channel_to(stars)
stellar.evolve_model(1|units.Myr)
channel.copy()
print "Star evolved to time=", stellar.model_time, \
" M=", stars.mass, "R=", stars.radius
stellar.stop()
###BOOKLISTSTOP2###
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-t", action="store_true",
dest="threaded", help="run threaded [%default]")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
set_printing_strategy("custom",\
preferred_units = [units.MSun, units.RSun, units.Myr],\
precision = 6, prefix = "", separator = "[", suffix = "]")
codes = [SeBa, MESA, SSE, EVtwin]
if o.threaded:
print "Run threaded"
evolve_with_different_stellar_model(codes)
else:
print "Run sequentially"
for ci in codes:
evolve_single_star(ci)
```
#### File: examples/textbook/old_merge_two_stars_sph_evolve.py
```python
import os
import os.path
import shutil
import numpy
from amuse.lab import *
from amuse.community.mesa.interface import MESA as stellar_evolution_code
from amuse.ext.star_to_sph import convert_stellar_model_to_SPH
from amuse.ext.sph_to_star import convert_SPH_to_stellar_model
from prepare_figure import single_frame
from distinct_colours import get_distinct
from matplotlib import pyplot
def plot_clumps(groups):
number_of_particles_in_group = []
fraction_of_mass_in_group = []
# number_of_particles_in_group.append(len(group))
# fraction = (group.mass.sum()/total_mass)
# fraction_of_mass_in_group.append(fraction)
print "N=", len(groups)
ci = ['r', 'b', 'g', 'k']
figure = pyplot.figure(figsize=(12,6))
i = 0
alpha = 1
sizes = 50
for group in groups:
pyplot.scatter(group.x.value_in(units.RSun),
group.y.value_in(units.RSun),
sizes, ci[i], edgecolors = "none", alpha = alpha)
# pyplot.scatter(
# group.x.value_in(units.RSun),
# group.y.value_in(units.RSun),
# s = 1,#group.mass.value_in(units.MSun),
# c = ci[i]
# )
i+=1
pyplot.xlabel('x (AU)')
pyplot.ylabel('y (A*)')
# pyplot.xlim(-30, 30)
# pyplot.ylim(-30, 30)
pyplot.show()
def find_clumps(particles, unit_converter):
hop = Hop(unit_converter)
hop.particles.add_particles(particles)
hop.calculate_densities()
mean_densty = hop.particles.density.mean()
hop.parameters.peak_density_threshold = mean_densty
hop.parameters.saddle_density_threshold = 0.99*mean_densty
hop.parameters.outer_density_threshold = 0.01*mean_densty
# print "Peak density threshold:",
hop.do_hop()
result = [x.get_intersecting_subset_in(particles) for x in hop.groups()]
hop.stop()
return result
def evolve_single_star(mass, tend):
star = Particles(1)
star.mass = mass
stellar_evolution = MESA()
stellar_evolution.particles.add_particles(star)
time = [] | units.Myr
mass = [] | units.MSun
radius = [] | units.RSun
temperature = [] | units.K
luminosity = [] | units.LSun
stellar_type = []
while stellar_evolution.model_time<tend:
stellar_evolution.evolve_model()
time.append(stellar_evolution.model_time)
mass.append(stellar_evolution.particles[0].mass)
radius.append(stellar_evolution.particles[0].radius)
temperature.append(stellar_evolution.particles[0].temperature)
luminosity.append(stellar_evolution.particles[0].luminosity)
stellar_type.append(stellar_evolution.particles[0].stellar_type)
print "Time=", time[-1], stellar_type[-1], mass[-1], radius[-1], \
temperature[-1].in_(units.K), luminosity[-1].in_(units.LSun)
if stellar_type[-1] >= 4 | units.stellar_type:
break
stellar_evolution.stop()
return time, stellar_type, mass, radius, temperature, luminosity
def merge_two_stars_sph_and_evolve(Mprim, Msec, tcoll, tend):
stars = Particles(2)
stars[0].mass = Mprim
stars[1].mass = Msec
stellar = EVtwin()
stellar.particles.add_particle(stars[0])
stellar.particles.add_particle(stars[1])
time = [] | units.Myr
mass = [] | units.MSun
radius = [] | units.RSun
temperature = [] | units.K
luminosity = [] | units.LSun
while stellar.model_time < tcoll:
stellar.evolve_model()
time.append(stellar.model_time)
mass.append(stellar.particles[0].mass)
radius.append(stellar.particles[0].radius)
temperature.append(stellar.particles[0].temperature)
luminosity.append(stellar.particles[0].luminosity)
print "Time=", time[-1], mass[-1], radius[-1], \
temperature[-1].in_(units.K), luminosity[-1].in_(units.LSun)
n_normal = len(time)
print stars
Nprim = int(100*stellar.particles[0].mass.value_in(units.MSun))
mgas = stellar.particles[0].mass/Nprim
Nsec = int(stellar.particles[1].mass/mgas)
print "N gas=", Nprim, Nsec
sph_primary = convert_stellar_model_to_SPH(
stellar.particles[0],
Nprim,
seed=12345
).gas_particles
sph_secondary = convert_stellar_model_to_SPH(
stellar.particles[0],
Nsec,
seed=12345
).gas_particles
stellar.stop()
distance = 1 | units.RSun
sph_secondary.x += distance
sph_secondary.vx -= 1.7*numpy.sqrt(2*constants.G*stars.mass.sum()/distance)
sph_particles = Particles()
sph_particles.add_particles(sph_primary)
#sph_particles.add_particles(sph_secondary)
sph_particles.move_to_center()
converter = nbody_system.nbody_to_si(1|units.hour, 1|units.RSun)
hydrodynamics = Gadget2(converter)
hydrodynamics.gas_particles.add_particles(sph_particles)
hydrodynamics.evolve_model(10.0|units.hour)
hydrodynamics.gas_particles.copy_values_of_attributes_to(["density", "u",
"pressure"],
sph_particles)
hydrodynamics.stop()
print "N all=", len(sph_particles)
clumps = find_clumps(sph_particles, converter)
#sph_particles = clumps[0]
print "N blob=", len(sph_particles)
#plot_clumps(clumps)
#sph_merger = sph_particles[0]
print "convert SPH to stellar model"
merged_star = convert_SPH_to_stellar_model(sph_particles)
print "initiate stellar evolution model"
#stellar_evolution = MESA()
stellar_evolution = EVtwin(redirect="none")
stellar_evolution.new_particle_from_model(merged_star, 0.0|units.Myr)
print "star:", stellar_evolution.particles
print "evolve star"
#stellar_evolution.evolve_model(tend)
while stellar_evolution.model_time<(tend-tcoll):
stellar_evolution.evolve_model()
time.append(stellar_evolution.model_time)
mass.append(stellar_evolution.particles[0].mass)
radius.append(stellar_evolution.particles[0].radius)
temperature.append(stellar_evolution.particles[0].temperature)
luminosity.append(stellar_evolution.particles[0].luminosity)
print "Time=", time[-1], mass[-1], radius[-1], \
temperature[-1].in_(units.K), luminosity[-1].in_(units.LSun)
print stellar_evolution.particles
stellar_evolution.stop()
return time, mass, radius, temperature, luminosity, n_normal
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-f",
dest="filename",
default = "hydro_triple_gas.hdf5",
help="input filename [%default]")
result.add_option("--tcoll", unit=units.Myr,
dest="tcoll", type="float",
default = 50|units.Myr,
help="evolution time scale [%default]")
result.add_option("--tend", unit=units.Myr,
dest="tend", type="float",
default = 423|units.Myr,
help="evolution time scale [%default]")
result.add_option("-M", unit=units.MSun,
dest="Mprim", type="float",
default = 3|units.MSun,
help="stellar mass [%default]")
result.add_option("-m", unit=units.MSun,
dest="Msec", type="float",
default = 1|units.MSun,
help="stellar mass [%default]")
return result
if __name__ == "__main__":
set_printing_strategy("custom", #nbody_converter = converter,
preferred_units = [units.MSun, units.RSun,
units.Myr],
precision = 11, prefix = "",
separator = " [", suffix = "]")
o, arguments = new_option_parser().parse_args()
Mprim = o.Mprim
Msec = o.Msec
tend = o.tend
tcoll = o.tcoll
x_label = "T [K]"
y_label = "L [$L_\odot$]"
figure = single_frame(x_label, y_label, logx=True, logy=True,
xsize=14, ysize=10)
color = get_distinct(4)
pyplot.xlim(5.e+4, 1.e+3)
print "Evolve single star of M=", (Mprim).in_(units.MSun)
time, stp, mass, radius, temperature, luminosity \
= evolve_single_star(Mprim, tend)
pyplot.plot(temperature.value_in(units.K),
luminosity.value_in(units.LSun),
c=color[0], lw=2)
pyplot.scatter(temperature[0].value_in(units.K),
luminosity[0].value_in(units.LSun),
c=color[0], s=150, marker="^")
print "Evolve single star of M=", (Mprim).in_(units.MSun)+(0.2|units.MSun)
time, stp, mass, radius, temperature, luminosity \
= evolve_single_star(Mprim+(0.2|units.MSun), tend)
pyplot.plot(temperature.value_in(units.K),
luminosity.value_in(units.LSun),
c=color[1], lw=2)
pyplot.scatter(temperature[0].value_in(units.K),
luminosity[0].value_in(units.LSun),
c=color[1], s=150, marker="^")
print "Evolve single star of M=", \
(Mprim+Msec).in_(units.MSun) + (0.4|units.MSun)
time, stp, mass, radius, temperature, luminosity \
= evolve_single_star(Mprim+(0.4|units.MSun), tend)
pyplot.plot(temperature.value_in(units.K),
luminosity.value_in(units.LSun),
c=color[3], lw=2)
pyplot.scatter(temperature[0].value_in(units.K),
luminosity[0].value_in(units.LSun),
c=color[3], s=150, marker="^")
tms = 0 |units.Myr
for i in range(len(stp)):
if stp[i]>=2 | units.stellar_type:
tms = time[i]
if tms <= 1|units.Myr:
tms = 10|units.Myr
print "Main-sequence age:", tms.in_(units.Myr)
tend = tms
print "Main sequence lifetime of star=", tms.in_(units.Myr)
#tcoll = 0.5*tms
time, mass, radius, temperature, luminosity, n \
= merge_two_stars_sph_and_evolve(o.Mprim, o.Msec, tcoll, o.tend)
pyplot.scatter(temperature[0].value_in(units.K),
luminosity[0].value_in(units.LSun),
c=color[2], s=150, marker="^")
pyplot.plot(temperature[:n].value_in(units.K),
luminosity[:n].value_in(units.LSun),
c=color[2])
pyplot.scatter(temperature[n-1].value_in(units.K),
luminosity[n-1].value_in(units.LSun),
c=color[2], s=150, marker="o")
pyplot.plot(temperature[n:].value_in(units.K),
luminosity[n:].value_in(units.LSun),
c=color[2])
pyplot.scatter(temperature[n+1].value_in(units.K),
luminosity[n+1].value_in(units.LSun),
c=color[2], s=150, marker="o")
pyplot.show()
```
#### File: examples/textbook/plot_M67withBSS_tracks.py
```python
from amuse.lab import *
from matplotlib import pyplot
import plot_M67Data
from prepare_figure import single_frame, figure_frame, set_tickmarks
from distinct_colours import get_distinct
TBSS = [6170, 6820, 6675, 7050, 6650]
LBSS = [12.259, 11.078, 12.127, 11.226, 12.892]
def single_star_evolution(M, z, model_time):
stellar = SeBa()
stellar.parameters.metallicity = z
stellar.particles.add_particle(Particle(mass=M))
stellar.commit_particles()
initial_luminosity = stellar.particles.luminosity
dt = 1 | units.Myr
time = 0 | units.Myr
L = [] | units.LSun
T = [] | units.K
while stellar.particles[0].age<model_time:
time += dt
stellar.evolve_model(time)
L.append(stellar.particles[0].luminosity)
T.append(stellar.particles[0].temperature)
final_luminosity = stellar.particles.luminosity
print "L(t=0)=", initial_luminosity, \
", L (t=", stellar.particles.age, ")=", \
final_luminosity, stellar.particles.radius
stellar.stop()
return L, T
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-M", unit= units.MSun,
dest="M", type="float",default = 1.7 | units.MSun,
help="stellar mass [1.0] %unit")
result.add_option("-t", unit = units.Myr,
dest="model_time", type="float",
default = 4000.0|units.Myr,
help="end time of the simulation [4.7] %unit")
result.add_option("-z", dest="z", type="float",
default = 0.02, help="metalicity [0.02]")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
x_label = "T [$K$]"
y_label = "L [$L_\odot$]"
figure = single_frame(x_label, y_label, logx=False, logy=True,
xsize=14, ysize=10)
color = get_distinct(6)
L, T = single_star_evolution(M=1.4|units.MSun, z=0.04,
model_time=4|units.Gyr)
pyplot.plot(T.value_in(units.K),L.value_in(units.LSun), c=color[0])
L, T = single_star_evolution(**o.__dict__)
pyplot.plot(T.value_in(units.K),L.value_in(units.LSun), c=color[4])
m67 = plot_M67Data.Cluster()
m67.read()
pyplot.scatter(m67.Teff, m67.L, c=color[1], lw=0, s=100)
pyplot.scatter(TBSS, LBSS, c=color[3], lw=0, s=250)
pyplot.scatter(TBSS[3], LBSS[3], c=color[2], lw=0, s=250)
pyplot.xlim(9000, 4000)
pyplot.ylim(1, 50)
save_file = 'fig_M67withBSS_tracks.png'
pyplot.savefig(save_file)
print '\nSaved figure in file', save_file,'\n'
pyplot.show()
```
#### File: examples/textbook/plot_wind_disk_interaction.py
```python
import sys
import numpy
from matplotlib import pyplot
from amuse.plot import scatter, xlabel, ylabel
from amuse.lab import *
from amuse.io import store
#from optparse import OptionParser
from time import sleep
from amuse.units.optparse import OptionParser
from amuse.plot import sph_particles_plot
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
import numpy as np
from prepare_figure import single_frame
#from distinct_colours import get_distinct
def main(filename = "hydro.hdf5", lim=None, image_id=-1, nice_plot=1):
x_label = 'x'
y_label = 'y'
figure = single_frame(x_label, y_label, logy=False, xsize=14, ysize=10)
stars = read_set_from_file(filename, "hdf5")
print stars
snapshot_id = 0
isgas = True
snapshot_id = 0
sinks = Particles(0)
for si in stars.history:
if isgas:
gas = si
isgas = False
else:
sinks = si
isgas = True
if not isgas:
snapshot_id += 1
time = gas.get_timestamp()
if image_id<0 or image_id == snapshot_id:
pyplot.hist2d(gas.x.value_in(units.AU),
gas.y.value_in(units.AU),
(200, 200), cmap=plt.cm.jet)
pyplot.scatter(gas.x.value_in(units.AU), gas.y.value_in(units.AU), c='y', s=100)
if False:
for gi in gas:
pyplot.arrow(gi.x.value_in(units.AU), gi.y.value_in(units.AU),
gi.vx.value_in(units.AU/units.yr), gi.vy.value_in(units.AU/units.yr),
head_width=0.05, head_length=0.1)
# sph_particles_plot(gas, u_range=[min(gas.u), max(gas.u)], width=lim, alpha = 1)
# if len(sinks):
# scatter(sinks.x.value_in(units.AU), sinks.y.value_in(units.AU), c='y', s=100)
if image_id == snapshot_id:
break
pyplot.xlabel("X [pc]")
pyplot.ylabel("Y [pc]")
pyplot.show()
def new_option_parser():
result = OptionParser()
result.add_option("-f", dest="filename", default = "DiskWind.h5",
help="output filename [DiskWind.h5]")
result.add_option("-l", unit=units.parsec,
dest="lim", type="float", default = None,
help="boxsize [%default]")
result.add_option("-i",
dest="image_id", type="int", default = -1,
help="image id [%default]")
result.add_option("-p",
dest="nice_plot", type="int", default = 1,
help="nice plot [%default]")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
filename = "hydro_GMC.h5"
gas = read_set_from_file(filename, "hdf5")
main(**o.__dict__)
```
#### File: examples/textbook/Sun_and_M67_in_the_Galaxy.py
```python
import numpy
import math
from amuse.lab import *
from amuse import datamodel
from amuse.units import quantities
from amuse.ext.rotating_bridge import Rotating_Bridge
from amuse.community.galaxia.interface import BarAndSpirals3D
from amuse.ext.composition_methods import *
from prepare_figure import *
from distinct_colours import get_distinct
class drift_without_gravity(object):
"""
This class is to make the ev of non interactig particles by using bridge.
"""
def __init__(self, particles, time= 0 |units.Myr):
self.particles=particles
self.model_time= time
def evolve_model(self, t_end):
dt= t_end- self.model_time
self.particles.position += self.particles.velocity*dt
self.model_time= t_end
@property
def potential_energy(self):
return quantities.zero
@property
def kinetic_energy(self):
return (0.5*self.particles.mass*self.particles.velocity.lengths()**2).sum()
I = 0
class IntegrateOrbit(object):
"""
This class makes the integration of the Sun in the Milky Way
by using BarAndSpirals3D.
galaxy(): Function that sets the desired Galactic model. Any question on the parameters, contact me
creation_particles_noinertial(): creates a parti le set in a rotating frame
noinertial_to_inertial(): converts data from rotating to inertial frame
get_pos_vel_and_orbit(): Makes the evolution of the particle set
"""
def __init__(self, t_end= 10 |units.Myr, dt_bridge=0.5 |units.Myr, method= SPLIT_6TH_SS_M13, phase_bar= 0, phase_spiral= 0, omega_spiral= -20 |(units.kms/units.kpc), amplitude= 650|(units.kms**2/units.kpc), m=4, omega_bar= -50 |(units.kms/units.kpc), mass_bar= 1.1e10 |units.MSun ):
# Simulation parameters
self.t_end= t_end
self.dt_bridge= dt_bridge
self.method= method
self.time= 0 |units.Myr
#galaxy parameters
self.omega= 0 | (units.kms/units.kpc)
self.initial_phase= 0
self.bar_phase= phase_bar
self.spiral_phase= phase_spiral
self.omega_spiral= omega_spiral
self.amplitude= amplitude
self.rsp= 3.12 |units.kpc
self.m= m
self.tan_pitch_angle= 0.227194425
self.omega_bar= omega_bar
self.mass_bar= mass_bar
self.aaxis_bar= 3.12 |units.kpc
self.axis_ratio_bar= 0.37
return
def galaxy(self):
global I
galaxy= BarAndSpirals3D(redirection='file', redirect_stdout_file="GAL{0}.log".format(I))
I = I + 1
galaxy.kinetic_energy=quantities.zero
galaxy.potential_energy=quantities.zero
galaxy.parameters.bar_contribution= True
galaxy.parameters.bar_phase= self.bar_phase
galaxy.parameters.omega_bar= self.omega_bar
galaxy.parameters.mass_bar= self.mass_bar
galaxy.parameters.aaxis_bar= self.aaxis_bar
galaxy.parameters.axis_ratio_bar= self.axis_ratio_bar
galaxy.parameters.spiral_contribution= False
galaxy.parameters.spiral_phase= self.spiral_phase
galaxy.parameters.omega_spiral= self.omega_spiral
galaxy.parameters.amplitude= self.amplitude
galaxy.parameters.rsp= self.rsp
galaxy.parameters.m= self.m
galaxy.parameters.tan_pitch_angle= self.tan_pitch_angle
galaxy.commit_parameters()
self.omega= galaxy.parameters.omega_system
self.initial_phase= galaxy.parameters.initial_phase
print "INITIAL_PHASE:", self.initial_phase
galaxy.kinetic_energy=quantities.zero
galaxy.potential_energy=quantities.zero
return galaxy
def creation_particles_noinertial(self, particles):
"""
makes trans in a counterclockwise frame.
If the Galaxy only has bar or only spiral arms, the frame corotates with
the bar or with the spiral arms. If the Galaxy has bar and spiral arms, the frame corotates with the bar
"""
no_inertial_system= particles.copy()
angle= self.initial_phase + self.omega*self.time
C1= particles.vx + self.omega*particles.y
C2= particles.vy - self.omega*particles.x
no_inertial_system.x = particles.x*numpy.cos(angle) + particles.y*numpy.sin(angle)
no_inertial_system.y = -particles.x*numpy.sin(angle) + particles.y*numpy.cos(angle)
no_inertial_system.z = particles.z
no_inertial_system.vx = C1*numpy.cos(angle) + C2*numpy.sin(angle)
no_inertial_system.vy = C2*numpy.cos(angle) - C1*numpy.sin(angle)
no_inertial_system.vz = particles.vz
return no_inertial_system
def noinertial_to_inertial(self, part_noin, part_in):
#makes trans in a counterclockwise frame
angle= self.initial_phase + self.omega*self.time
C1= part_noin.vx - part_noin.y*self.omega
C2= part_noin.vy + part_noin.x*self.omega
part_in.x= part_noin.x*numpy.cos(angle)-part_noin.y*numpy.sin(angle)
part_in.y= part_noin.x*numpy.sin(angle)+part_noin.y*numpy.cos(angle)
part_in.z= part_noin.z
part_in.vx= C1*numpy.cos(angle) - C2*numpy.sin(angle)
part_in.vy= C1*numpy.sin(angle) + C2*numpy.cos(angle)
part_in.vz= part_noin.vz
return
def testing_potential_and_force(self, galaxy, x, y, z):
dx, dy, dz = 0.001 |units.kpc, 0.001 |units.kpc, 0.001 |units.kpc
phi1x= galaxy.get_potential_at_point(0 |units.kpc, (x+dx), y, z)
phi2x= galaxy.get_potential_at_point(0 |units.kpc, (x-dx), y, z)
f1x= -(phi1x-phi2x)/(2*dx)
phi1y= galaxy.get_potential_at_point(0 |units.kpc, x, (y+dy), z)
phi2y= galaxy.get_potential_at_point(0 |units.kpc, x, (y-dy), z)
f1y= -(phi1y-phi2y)/(2*dy)
phi1z= galaxy.get_potential_at_point(0 |units.kpc, x, y, (z+dz))
phi2z= galaxy.get_potential_at_point(0 |units.kpc, x, y, (z-dz))
f1z= -(phi1z-phi2z)/(2*dz)
fx,fy,fz= galaxy.get_gravity_at_point(0 |units.kpc, x, y, z)
print "analytic", "numerical"
print fx.value_in(100*units.kms**2/units.kpc) , f1x.value_in(100*units.kms**2/units.kpc)
print fy.value_in(100*units.kms**2/units.kpc) , f1y.value_in(100*units.kms**2/units.kpc)
print fz.value_in(100*units.kms**2/units.kpc) , f1z.value_in(100*units.kms**2/units.kpc)
return
def get_pos_vel_and_orbit(self, particle_set):
#particle_set.velocity= (-1)*particle_set.velocity
filename="sunandM67.hdf5"
write_set_to_file(particle_set.savepoint(self.time),
filename, "hdf5", append_to_file=False)
MW= self.galaxy()
print "OMEGA:", self.omega.as_quantity_in(1/units.Gyr)
particle_rot= self.creation_particles_noinertial(particle_set)
gravless= drift_without_gravity(particle_rot)
system= Rotating_Bridge(self.omega, timestep= self.dt_bridge, verbose= False, method= self.method)
system.add_system(gravless, (MW,), False)
system.add_system(MW, (), False) # This is to update time inside the interface
Ei= system.potential_energy+ system.kinetic_energy+ system.jacobi_potential_energy
energy=[]
dmin = (particle_set[0].position-particle_set[1].position).length()
tmin = 0|units.Myr
d = [] | units.kpc
t = [] | units.Myr
d.append(dmin)
t.append(self.time)
while (self.time < self.t_end-self.dt_bridge/2):
self.time += self.dt_bridge
system.evolve_model(self.time)
self.noinertial_to_inertial(particle_rot, particle_set)
Ef= system.potential_energy+ system.kinetic_energy+ system.jacobi_potential_energy
dje= (Ef-Ei)/Ei
energy.append(dje)
d.append((particle_set[0].position-particle_set[1].position).length())
t.append(self.time)
if d[-1]<dmin:
dmin = d[-1]
tmin = self.time
x = particle_set.x
y = particle_set.y
write_set_to_file(particle_set.savepoint(self.time), filename, "hdf5")
print "minimum", tmin.in_(units.Myr), dmin.in_(units.parsec)
bar_angle= self.bar_phase + (self.omega_bar*self.time)
spiral_angle= self.spiral_phase + (self.omega_spiral*self.time)
return self.time, particle_set[0].x.value_in(units.kpc), particle_set[0].y.value_in(units.kpc),\
particle_set[0].z.value_in(units.kpc), particle_set[0].vx.value_in(units.kms), \
particle_set[0].vy.value_in(units.kms), particle_set[0].vz.value_in(units.kms), \
bar_angle , spiral_angle, t, d
def Sun_and_M67_in_the_Galaxy():
bodies = Particles(2)
Sun = bodies[0]
v_LSR = (-10, 5.2, 7.2) | units.kms
Sun.mass = 1|units.MSun
Sun.radius = 1|units.RSun
Sun.position = (8.4, 0.0, 0.017) | units.kpc
Sun.velocity = (-11.4, 232, 7.41) | units.kms # SPZ2009
M67 = bodies[1]
M67.mass = 50000 | units.MSun
M67.radius = 3 | units.parsec
M67.position = Sun.position + ((0.766, 0.0, 0.49) |units.kpc)
M67.velocity = Sun.velocity + ((31.92, -21.66, -8.87) |units.kms)
bodies.velocity *= -1
simulation_time= 4600. |units.Myr
dt_bridge= 5 | units.Myr
OS= 20 |(units.kms/units.kpc)
OB= 40 |(units.kms/units.kpc)
A= 1300 |(units.kms**2/units.kpc)
M= 1.4e10 |units.MSun
m=2
phi_bar, phi_sp= -0.34906, -0.34906
inte= IntegrateOrbit(
t_end= simulation_time,
dt_bridge= dt_bridge,
phase_bar= phi_bar, phase_spiral= phi_sp,
omega_spiral= OS, omega_bar= OB,
amplitude= A, m=m, mass_bar= M )
MW= inte.galaxy()
print MW.parameters
print MW.get_phi21()
print "Backwards integration"
time, xf, yf, zf, vxf, vyf, vzf, bar_angle, spiral_angle, t1, d1= inte.get_pos_vel_and_orbit(bodies)
print "Birth position of the Sun:", xf, yf, zf, vxf, vyf, vzf
print "---"
print 'time after backward integration:', time
colors = get_distinct(4)
figure = pyplot.figure(figsize=(16, 12))
ax = pyplot.gca()
ax.minorticks_on() # switch on the minor ticks
ax.locator_params(nbins=3)
x_label = "t [Gyr]"
y_label = "d [kpc]"
pyplot.xlabel(x_label)
pyplot.ylabel(y_label)
pyplot.plot(-t1.value_in(units.Gyr), d1.value_in(units.kpc), lw=3, c=colors[0])
pyplot.ylim(0, 6)
pyplot.xlim(-5, 0)
pyplot.savefig("sun_and_M67")
pyplot.show()
"""
from matplotlib import pyplot
from amuse.plot import scatter, plot
plot(-1*t1, d1, c = "k")
pyplot.semilogy()
pyplot.show()
"""
if __name__ in ('__main__', '__plot__'):
set_printing_strategy("custom", #nbody_converter = converter,
preferred_units = [units.MSun, units.kpc, units.Myr],
precision = 4, prefix = "",
separator = " [", suffix = "]")
Sun_and_M67_in_the_Galaxy()
```
#### File: examples/validation/particles_and_gas_in_cluster.py
```python
from amuse.couple import bridge
from amuse.community.bhtree.interface import BHTree
from amuse.community.hermite0.interface import Hermite
from amuse.community.fi.interface import Fi
from amuse.community.octgrav.interface import Octgrav
from amuse.community.gadget2.interface import Gadget2
from amuse.community.phiGRAPE.interface import PhiGRAPE
from amuse.ic import plummer
from amuse.ic import gasplummer
from amuse.units import units
from amuse.units import constants
from amuse.units import quantities
from amuse.units import nbody_system
from optparse import OptionParser
import numpy
# import time
try:
import pylab
except ImportError:
pylab = None
class GasPlummerModelExternalField(object):
"""
skeleton grav code for use in bridge.
must have get_gravity_at_point and get_potential_at_point
"""
def __init__(
self,
position=[0, 0, 0] | units.parsec,
radius=1000. | units.parsec,
total_mass=1.6e10 | units.MSun):
self.radius = radius
self.total_mass = total_mass
self.gravity_constant = constants.G
self.position = position
self.radius_squared = (self.radius**2)
def get_gravity_at_point(self, eps, x, y, z):
dx = x - self.position.x
dy = y - self.position.y
dz = z - self.position.z
radii_squared = dx**2 + dy**2 + dz**2
# radii = radii_squared**0.5
plummer_radii_squared = radii_squared + self.radius_squared
plummer_radii_15 = plummer_radii_squared ** 1.5
fr = -self.gravity_constant*self.total_mass/plummer_radii_15
ax = fr*dx
ay = fr*dy
az = fr*dz
return ax, ay, az
def get_potential_at_point(self, eps, x, y, z):
dx = x - self.position.x
dy = y - self.position.y
dz = z - self.position.z
radii_squared = dx**2 + dy**2 + dz**2
# radii = radii_squared**0.5
plummer_radii = (radii_squared + self.radius_squared)**0.5
phi = self.gravity_constant*self.total_mass/plummer_radii
return -phi * 2
def stop(self):
pass
@property
def kinetic_energy(self):
return quantities.zero
@property
def potential_energy(self):
return quantities.zero
@property
def thermal_energy(self):
return quantities.zero
class AbstractStarAndGasPlummerCode(object):
def __init__(self,
nstars=10,
ngas=-1,
endtime=10,
total_mass=1000,
gas_fraction=0.9,
rscale=1.0,
star_smoothing_fraction=0.001,
gas_smoothing_fraction=0.05,
seed=-1,
ntimesteps=10,
must_do_plot=True
):
if seed >= 0:
numpy.random.seed(seed)
if ngas < 0:
ngas = nstars * 10
self.must_do_plot = must_do_plot
self.line = None
self.line2 = None
self.ntimesteps = ntimesteps
self.ngas = ngas
self.nstars = nstars
self.total_mass = total_mass | units.MSun
self.gas_fraction = gas_fraction
self.star_fraction = 1.0 - self.gas_fraction
self.rscale = rscale | units.parsec
self.star_epsilon = star_smoothing_fraction * self.rscale
self.gas_epsilon = gas_smoothing_fraction * self.rscale
self.star_mass = self.star_fraction * self.total_mass
self.gas_mass = self.gas_fraction * self.total_mass
self.converter = nbody_system.nbody_to_si(self.total_mass, self.rscale)
self.endtime = self.converter.to_si(endtime | nbody_system.time)
self.delta_t = self.endtime / self.ntimesteps
def update_plot(self, time, code):
time = self.converter.to_nbody(time).value_in(nbody_system.time),
sum_energy = (
code.kinetic_energy
+ code.potential_energy
+ code.thermal_energy
)
energy = self.converter.to_nbody(
sum_energy).value_in(nbody_system.energy)
coreradius = (
self.star_code.particles.virial_radius().value_in(
self.rscale.to_unit())
)
# kicke = self.converter.to_nbody(code.kick_energy).value_in(nbody_system.energy)
if self.line is None:
pylab.ion()
pylab.subplot(1, 2, 1)
self.line = pylab.plot([time], [energy])[0]
pylab.xlim(0, self.converter.to_nbody(
self.endtime).value_in(nbody_system.time))
pylab.ylim(energy * 0.8, energy * 1.2)
pylab.subplot(1, 2, 2)
self.line2 = pylab.plot([time], [coreradius])[0]
# self.line2 = pylab.plot([time], [kicke])[0]
pylab.xlim(0, self.converter.to_nbody(
self.endtime).value_in(nbody_system.time))
pylab.ylim(0, 3)
# pylab.ylim(-0.1, 0.1)
else:
xdata = self.line.get_xdata()
ydata = self.line.get_ydata()
xdata = numpy.concatenate((xdata, time))
ydata = numpy.concatenate((ydata, [energy]))
self.line.set_xdata(xdata)
self.line.set_ydata(ydata)
xdata = self.line2.get_xdata()
ydata = self.line2.get_ydata()
xdata = numpy.concatenate((xdata, time))
# ydata = numpy.concatenate( (ydata, [kicke]) )
ydata = numpy.concatenate((ydata, [coreradius]))
self.line2.set_xdata(xdata)
self.line2.set_ydata(ydata)
pylab.draw()
def new_particles_cluster(self):
particles = plummer.new_plummer_model(
self.nstars, convert_nbody=self.converter)
particles.radius = self.star_epsilon
particles.mass = (1.0/self.nstars) * self.star_mass
return particles
def new_gas_cluster(self):
particles = gasplummer.new_plummer_gas_model(
self.ngas, convert_nbody=self.converter)
particles.h_smooth = self.gas_epsilon
particles.mass = (1.0/self.ngas) * self.gas_mass
return particles
def new_particles_cluster_as_gas(self):
particles = plummer.new_plummer_model(
self.ngas, convert_nbody=self.converter)
particles.radius = self.gas_epsilon
particles.mass = (1.0/self.ngas) * self.gas_mass
return particles
def stop(self):
pass
def evolve_model(self):
if self.must_do_plot:
self.update_plot(time=0 * self.delta_t, code=self.code)
for time in self.delta_t * range(1, self.ntimesteps+1):
self.code.evolve_model(time)
print self.converter.to_nbody(self.code.time)
if self.must_do_plot:
self.update_plot(time=self.code.time, code=self.code)
class BridgeStarAndGasPlummerCode(AbstractStarAndGasPlummerCode):
def __init__(self,
nstars=10,
ngas=-1,
endtime=10,
total_mass=1000,
gas_fraction=0.9,
rscale=1.0,
star_code='hermite',
gas_code='field',
star_smoothing_fraction=0.001,
gas_smoothing_fraction=0.05,
seed=-1,
ntimesteps=10,
interaction_timestep=0.01,
must_do_plot=True,
gas_to_star_interaction_code='none',
star_to_gas_interaction_code='none',
**ignored_options
):
AbstractStarAndGasPlummerCode.__init__(
self,
nstars,
ngas,
endtime,
total_mass,
gas_fraction,
rscale,
star_smoothing_fraction,
gas_smoothing_fraction,
seed,
ntimesteps,
must_do_plot
)
self.interaction_timestep = self.converter.to_si(
interaction_timestep | nbody_system.time)
self.create_codes(
gas_code,
star_code,
gas_to_star_interaction_code,
star_to_gas_interaction_code,
)
self.create_bridge()
self.code = self.bridge_system
time = 0
sum_energy = self.code.kinetic_energy + \
self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(
sum_energy).value_in(nbody_system.energy)
coreradius = self.star_code.particles.virial_radius().value_in(
self.rscale.to_unit())
print "Time :", time
print "Energy :", energy
print "Virial radius :", coreradius
self.evolve_model()
if must_do_plot:
pylab.show()
pylab.savefig(
"{0}-{1}-{2}-{3}.png".format(
star_code,
gas_code,
nstars,
ngas
)
)
time = self.converter.to_nbody(
self.code.time).value_in(nbody_system.time)
sum_energy = self.code.kinetic_energy + \
self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(
sum_energy).value_in(nbody_system.energy)
coreradius = self.star_code.particles.virial_radius().value_in(
self.rscale.to_unit())
print "Time :", time
print "Energy :", energy
print "Virial radius :", coreradius
self.stop()
if must_do_plot:
raw_input('Press enter...')
def create_codes(
self,
gas_code,
star_code,
gas_to_star_interaction_code,
star_to_gas_interaction_code):
self.star_code = getattr(self, 'new_star_code_'+star_code)()
self.gas_code = getattr(self, 'new_gas_code_'+gas_code)()
self.gas_to_star_codes = getattr(
self,
'new_gas_to_star_interaction_codes_'+gas_to_star_interaction_code
)(self.gas_code)
self.star_to_gas_codes = getattr(
self,
'new_star_to_gas_interaction_codes_'+star_to_gas_interaction_code
)(self.star_code)
def create_bridge(self):
bridge_code1 = bridge.GravityCodeInField(
self.gas_code, self.star_to_gas_codes
)
bridge_code2 = bridge.GravityCodeInField(
self.star_code, self.gas_to_star_codes
)
self.bridge_system = bridge.Bridge(
timestep=self.interaction_timestep,
use_threading=False
)
self.bridge_system.add_code(bridge_code2)
self.bridge_system.add_code(bridge_code1)
def stop(self):
self.star_code.stop()
self.gas_code.stop()
def new_gas_to_star_interaction_codes_self(self, gas_code):
return [gas_code]
def new_star_to_gas_interaction_codes_self(self, star_code):
return [star_code]
def new_gas_to_star_interaction_codes_none(self, gas_code):
return []
def new_star_to_gas_interaction_codes_none(self, gas_code):
return []
def new_gas_to_star_interaction_codes_octgrav(self, gas_code):
def new_octgrav():
result = Octgrav(self.converter)
result.parameters.epsilon_squared = self.gas_epsilon ** 2
return result
return [bridge.CalculateFieldForCodes(new_octgrav, [gas_code])]
def new_gas_to_star_interaction_codes_bhtree(self, gas_code):
def new_bhtree():
result = BHTree(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
return result
return [bridge.CalculateFieldForCodes(new_bhtree, [gas_code])]\
def new_gas_code_fi(self):
result = Fi(self.converter)
result.parameters.self_gravity_flag = True
result.parameters.use_hydro_flag = True
result.parameters.radiation_flag = False
result.parameters.periodic_box_size = 500 | units.parsec
result.parameters.timestep = 0.125 * self.interaction_timestep
# result.parameters.adaptive_smoothing_flag = True
# result.parameters.epsilon_squared = self.gas_epsilon ** 2
# result.parameters.eps_is_h_flag = False
result.parameters.integrate_entropy_flag = False
# result.parameters.self_gravity_flag = False
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_star_code_fi(self):
result = Fi(self.converter)
result.parameters.self_gravity_flag = True
result.parameters.use_hydro_flag = False
result.parameters.radiation_flag = False
result.parameters.periodic_box_size = 500 | units.parsec
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_gas_code_gadget(self):
result = Gadget2(self.converter)
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_gas_code_field(self):
result = GasPlummerModelExternalField(
radius=self.rscale,
total_mass=self.gas_mass
)
return result
def new_gas_code_hermite(self):
result = Hermite(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.particles.add_particles(self.new_particles_cluster_as_gas())
result.commit_particles()
return result
def new_star_code_hermite(self):
result = Hermite(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_star_code_phigrape(self):
result = PhiGRAPE(self.converter, mode="gpu")
result.parameters.initialize_gpu_once = 1
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_star_code_bhtree(self):
result = BHTree(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_star_code_octgrav(self):
result = Octgrav(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_gas_code_bhtree(self):
result = BHTree(self.converter)
result.parameters.epsilon_squared = self.gas_epsilon ** 2
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster_as_gas())
result.commit_particles()
return result
class AllInOneStarAndGasPlummerCode(AbstractStarAndGasPlummerCode):
def __init__(self,
nstars=10,
ngas=-1,
endtime=10,
total_mass=1000,
gas_fraction=0.9,
rscale=1.0,
sph_code='fi',
star_smoothing_fraction=0.001,
gas_smoothing_fraction=0.05,
seed=-1,
ntimesteps=10,
must_do_plot=True,
interaction_timestep=0.01,
**ignored_options
):
AbstractStarAndGasPlummerCode.__init__(
self,
nstars,
ngas,
endtime,
total_mass,
gas_fraction,
rscale,
star_smoothing_fraction,
gas_smoothing_fraction,
seed,
ntimesteps,
must_do_plot
)
self.interaction_timestep = self.converter.to_si(
interaction_timestep | nbody_system.time)
self.create_code(sph_code)
sum_energy = self.code.kinetic_energy + \
self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(
sum_energy).value_in(nbody_system.energy)
coreradius = self.code.dm_particles.virial_radius().value_in(
self.rscale.to_unit())
print "Time:", 0
print "Energy:", energy
print "Virial radius:", coreradius
self.evolve_model()
if must_do_plot:
pylab.show()
pylab.savefig(
"{0}-{1}-{2}.png".format(
sph_code,
nstars,
ngas
)
)
time = self.converter.to_nbody(
self.code.model_time).value_in(nbody_system.time)
sum_energy = self.code.kinetic_energy + \
self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(
sum_energy).value_in(nbody_system.energy)
coreradius = self.code.dm_particles.virial_radius().value_in(
self.rscale.to_unit())
print "Time:", time
print "Energy:", energy
print "Virial radius:", coreradius
self.stop()
if must_do_plot:
raw_input('Press enter...')
def evolve_model(self):
if self.must_do_plot:
self.update_plot(time=0 * self.delta_t, code=self.code)
for time in self.delta_t * range(1, self.ntimesteps+1):
self.code.evolve_model(time)
print self.converter.to_nbody(self.code.model_time)
if self.must_do_plot:
self.update_plot(time=self.code.time, code=self.code)
def create_code(self, name):
self.code = getattr(self, 'new_sph_code_'+name)()
def stop(self):
self.code.stop()
def new_sph_code_fi(self):
result = Fi(self.converter)
result.parameters.self_gravity_flag = True
result.parameters.use_hydro_flag = True
result.parameters.radiation_flag = False
result.parameters.periodic_box_size = 500 | units.parsec
result.parameters.timestep = 0.125 * self.interaction_timestep
# result.parameters.adaptive_smoothing_flag = True
# result.parameters.epsilon_squared = self.gas_epsilon ** 2
# result.parameters.eps_is_h_flag = False
result.parameters.integrate_entropy_flag = False
result.dm_particles.add_particles(self.new_particles_cluster())
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_sph_code_gadget(self):
result = Gadget2(self.converter)
result.dm_particles.add_particles(self.new_particles_cluster())
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_option_parser():
result = OptionParser()
result.add_option(
"-n", "--nstar",
default=10,
dest="nstars",
help="number of star particles",
type="int"
)
result.add_option(
"-g", "--ngas",
default=-1,
dest="ngas",
help="number of gas particles (if -1, 10 times the number of stars)",
type="int"
)
result.add_option(
"--gas-code",
default="field",
dest="gas_code",
help="the code modelling the gas ('fi', 'gadget', 'field')",
type="string"
)
result.add_option(
"--star-code",
default="hermite",
dest="star_code",
help="the code modelling the particles ('hermite', 'bhtree', 'octgrav', 'phigrape')",
type="string"
)
result.add_option(
"--sph-code",
default="fi",
dest="sph_code",
help="the code modelling the particles and the gas simultaniously",
type="string"
)
result.add_option(
"--gas-star-code",
default="self",
dest="gas_to_star_interaction_code",
help="the code calculating the gravity field of the gas code for the star code (default is self, gas code will calculate field for star code)",
type="string"
)
result.add_option(
"--star-gas-code",
default="self",
dest="star_to_gas_interaction_code",
help="the code calculating the gravity field of the star code for the gas code (default is self, star code will calculate field for gas code)",
type="string"
)
result.add_option(
"-m", "--total-mass",
default=1000.0,
dest="total_mass",
help="the total mass in solar masses",
type="float"
)
result.add_option(
"--gas-fraction",
default=0.9,
dest="gas_fraction",
help="the gas fraction between 0.0 and 1.0 (default 0.9)",
type="float"
)
result.add_option(
"-r", "--rscale",
default=1.0,
dest="rscale",
help="length scale of the problem in parsec (default 1) ",
type="float"
)
result.add_option(
"--star_smoothing_fraction",
default=0.001,
dest="star_smoothing_fraction",
help="smoothing length of the stars as a fraction of the length scale",
type="float"
)
result.add_option(
"--gas_smoothing_fraction",
default=0.05,
dest="gas_smoothing_fraction",
help="smoothing length of the gas particles as a fraction of the length scale",
type="float"
)
result.add_option(
"-s", "--seed",
default=0,
dest="seed",
help="random number seed (-1, no seed)",
type="int"
)
result.add_option(
"--interaction-timestep",
default=0.01,
dest="interaction_timestep",
help="time between bridge interactions (0.01 nbody time)",
type="float"
)
result.add_option(
"-t", "--end-time",
default=1,
dest="endtime",
help="end time of the simulation (in nbody time, default 1)",
type="float"
)
result.add_option(
"--ntimesteps",
default=10,
dest="ntimesteps",
help="number of times to do reporting",
type="int"
)
result.add_option(
"--noplot",
dest="must_do_plot",
default=True,
help="do not show a plot and end as soon as possible",
action="store_false"
)
result.add_option(
"--allinoone",
dest="must_do_bridge",
default=True,
help="simulate the stars and gas with one sph code",
action="store_false"
)
return result
if __name__ == "__main__":
options, arguments = new_option_parser().parse_args()
if options.must_do_bridge:
options = options.__dict__
BridgeStarAndGasPlummerCode(**options)
else:
options = options.__dict__
AllInOneStarAndGasPlummerCode(**options)
```
#### File: community/adaptb/interface.py
```python
import warnings
from amuse.community import *
from amuse.community.interface.gd import GravitationalDynamicsInterface, GravitationalDynamics
class AdaptbInterface(CodeInterface, GravitationalDynamicsInterface, LiteratureReferencesMixIn,
StoppingConditionInterface, CodeWithDataDirectories):
"""
Adaptb (Accurate Dynamics with Arbitrary Precision by <NAME>)
"""
include_headers = ['worker_code.h', 'stopcond.h']
def __init__(self, **options):
CodeInterface.__init__(self, name_of_the_worker="adaptb_worker", **options)
LiteratureReferencesMixIn.__init__(self)
CodeWithDataDirectories.__init__(self)
warnings.warn("Adaptb is superseded by Brutus")
@legacy_function
def get_adaptb_output_directory():
function = LegacyFunctionSpecification()
function.addParameter('adaptb_output_directory', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_adaptb_output_directory():
function = LegacyFunctionSpecification()
function.addParameter('adaptb_output_directory', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def new_particle_float64():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('identity_of_the_particle', dtype='int32', direction=function.OUT)
function.addParameter('mass', dtype='float64', direction=function.IN, description = "The mass of the particle")
function.addParameter('x', dtype='float64', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('y', dtype='float64', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('z', dtype='float64', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('vx', dtype='float64', direction=function.IN, description = "The initial velocity vector of the particle")
function.addParameter('vy', dtype='float64', direction=function.IN, description = "The initial velocity vector of the particle")
function.addParameter('vz', dtype='float64', direction=function.IN, description = "The initial velocity vector of the particle")
function.addParameter('radius', dtype='float64', direction=function.IN, description = "The radius of the particle", default = 0)
function.result_type = 'int32'
return function
@legacy_function
def new_particle_string():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('identity_of_the_particle', dtype='int32', direction=function.OUT)
function.addParameter('mass', dtype='string', direction=function.IN, description = "The mass of the particle")
function.addParameter('x', dtype='string', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('y', dtype='string', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('z', dtype='string', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('vx', dtype='string', direction=function.IN, description = "The initial velocity vector of the particle")
function.addParameter('vy', dtype='string', direction=function.IN, description = "The initial velocity vector of the particle")
function.addParameter('vz', dtype='string', direction=function.IN, description = "The initial velocity vector of the particle")
function.addParameter('radius', dtype='string', direction=function.IN, description = "The radius of the particle", default='0')
function.result_type = 'int32'
return function
def new_particle(self, mass, x,y,z, vx,vy,vz, radius = 0):
if isinstance(mass, str):
return self.new_particle_string(mass, x,y,z, vx,vy,vz, radius = str(radius))
else:
return self.new_particle_float64(mass, x,y,z, vx,vy,vz, radius = radius)
@legacy_function
def get_bs_tolerance_string():
function = LegacyFunctionSpecification()
function.addParameter('bs_tolerance', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_bs_tolerance_string():
function = LegacyFunctionSpecification()
function.addParameter('bs_tolerance', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_bs_tolerance_float64():
function = LegacyFunctionSpecification()
function.addParameter('bs_tolerance', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_bs_tolerance_float64():
function = LegacyFunctionSpecification()
function.addParameter('bs_tolerance', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_word_length():
function = LegacyFunctionSpecification()
function.addParameter('word_length', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_word_length():
function = LegacyFunctionSpecification()
function.addParameter('word_length', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dt_print():
function = LegacyFunctionSpecification()
function.addParameter('dt_print', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dt_print():
function = LegacyFunctionSpecification()
function.addParameter('dt_print', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_max_cpu_time():
function = LegacyFunctionSpecification()
function.addParameter('max_cpu_time', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_max_cpu_time():
function = LegacyFunctionSpecification()
function.addParameter('max_cpu_time', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
class Adaptb(GravitationalDynamics):
def __init__(self, convert_nbody = None, **options):
self.stopping_conditions = StoppingConditions(self)
legacy_interface = AdaptbInterface(**options)
self.legacy_doc = legacy_interface.__doc__
GravitationalDynamics.__init__(
self,
legacy_interface,
convert_nbody,
**options
)
def initialize_code(self):
result = self.overridden().initialize_code()
self.parameters.adaptb_output_directory = self.output_directory
return result
def define_parameters(self, object):
GravitationalDynamics.define_parameters(self, object)
self.stopping_conditions.define_parameters(object)
object.add_method_parameter(
"get_bs_tolerance_float64",
"set_bs_tolerance_float64",
"bs_tolerance",
"Error tolerance of the Bulirsch-Stoer integrator",
default_value = 1.0e-6
)
object.add_method_parameter(
"get_eps2",
"set_eps2",
"epsilon_squared",
"smoothing parameter for gravity calculations, usage is not recommended for Adaptb",
default_value = 0.0 | nbody_system.length**2
)
object.add_method_parameter(
"get_dt_print",
"set_dt_print",
"dt_print",
"dt_print, regular print interval to show status (% complete) of evolve_model",
default_value = 0.1 | nbody_system.time
)
object.add_method_parameter(
"get_word_length",
"set_word_length",
"word_length",
"The word length, or number of bits, used for the arbitrary precision calculations",
default_value = 64
)
object.add_method_parameter(
"get_adaptb_output_directory",
"set_adaptb_output_directory",
"adaptb_output_directory",
"Path to the directory where Adaptb stores its output",
default_value = "./"
)
object.add_method_parameter(
"get_max_cpu_time",
"set_max_cpu_time",
"time_limit_cpu",
"The cpu-time limit, the maximum amount of time Adaptb is allowed to run for.",
default_value = 3600.0 | units.s
)
def define_methods(self, object):
GravitationalDynamics.define_methods(self, object)
self.stopping_conditions.define_methods(object)
object.add_method("get_bs_tolerance_float64", (), (object.NO_UNIT, object.ERROR_CODE,))
object.add_method("set_bs_tolerance_float64", (object.NO_UNIT, ), (object.ERROR_CODE,))
object.add_method("get_eps2", (), (nbody_system.length**2, object.ERROR_CODE,))
object.add_method("set_eps2", (nbody_system.length**2, ), (object.ERROR_CODE,))
object.add_method("get_dt_print", (), (nbody_system.time, object.ERROR_CODE,))
object.add_method("set_dt_print", (nbody_system.time, ), (object.ERROR_CODE,))
object.add_method("get_word_length", (), (object.NO_UNIT, object.ERROR_CODE,))
object.add_method("set_word_length", (object.NO_UNIT, ), (object.ERROR_CODE,))
object.add_method("get_adaptb_output_directory", (), (object.NO_UNIT, object.ERROR_CODE,))
object.add_method("set_adaptb_output_directory", (object.NO_UNIT, ), (object.ERROR_CODE,))
object.add_method("get_max_cpu_time", (), (units.s, object.ERROR_CODE,))
object.add_method("set_max_cpu_time", (units.s, ), (object.ERROR_CODE,))
def define_particle_sets(self, object):
GravitationalDynamics.define_particle_sets(self, object)
self.stopping_conditions.define_particle_set(object)
def define_state(self, object):
GravitationalDynamics.define_state(self, object)
self.stopping_conditions.define_state(object)
```
#### File: community/interface/stopping_conditions.py
```python
from amuse.units import units, generic_unit_system
from amuse.units import nbody_system as nbody
from amuse.support.exceptions import AmuseException
from amuse.rfi.core import legacy_function
from amuse.rfi.core import LegacyFunctionSpecification
class StoppingConditionInterface(object):
@legacy_function
def has_stopping_condition():
"""
Return 1 if the stopping condition with
the given index is supported by the code,
0 otherwise.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('type', dtype='int32', direction=function.IN, description = "The type index of the stopping condition")
function.addParameter('result', dtype='int32', direction=function.OUT, description = "1 if the stopping condition is supported")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def enable_stopping_condition():
"""
Will enable the stopping if it is supported
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('type', dtype='int32', direction=function.IN, description = "The type index of the stopping condition")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def disable_stopping_condition():
"""
Will disable the stopping if it is supported
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('type', dtype='int32', direction=function.IN, description = "The index of the stopping condition")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def is_stopping_condition_enabled():
"""
Return 1 if the stopping condition with
the given index is enabled,0 otherwise.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('type', dtype='int32', direction=function.IN, description = "The index of the stopping condition")
function.addParameter('result', dtype='int32', direction=function.OUT, description = "1 if the stopping condition is enabled")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def is_stopping_condition_set():
"""
Return 1 if the stopping condition with
the given index is enabled,0 otherwise.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('type', dtype='int32', direction=function.IN, description = "The index of the stopping condition")
function.addParameter('result', dtype='int32', direction=function.OUT, description = "1 if the stopping condition is enabled")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_number_of_stopping_conditions_set():
"""
Return the number of stopping conditions set, one
condition can be set multiple times.
Stopping conditions are set when the code determines
that the conditions are met. The objects or or information
about the condition can be retrieved with
the get_stopping_condition_info method.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('result', dtype='int32', direction=function.OUT, description = "> 1 if any stopping condition is set")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_stopping_condition_info():
"""
Generic function for getting the information connected to
a stopping condition. Index can be between 0 and
the result of the :method:`get_number_of_stopping_conditions_set`
method.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index', dtype='int32', direction=function.IN, description = "Index in the array[0,number_of_stopping_conditions_set>")
function.addParameter('type', dtype='int32', direction=function.OUT, description = "Kind of the condition, can be used to retrieve specific information")
function.addParameter('number_of_particles', dtype='int32', direction=function.OUT, description = "Number of particles that met this condition")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_stopping_condition_particle_index():
"""
For collision detection
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index', dtype='int32', direction=function.IN, description = "Index in the array[0,number_of_stopping_conditions_set>")
function.addParameter('index_of_the_column', dtype='int32', direction=function.IN, description = "Column index involved in the condition (for pair collisons 0 and 1 are possible)")
function.addParameter('index_of_particle', dtype='int32', direction=function.OUT, description = "Set to the identifier of particle[index_of_the_column][index]")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_stopping_condition_timeout_parameter():
"""
Set max computer time available (in seconds).
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('value', dtype='float64', direction=function.IN, description = "Available wallclock time in seconds")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
@legacy_function
def get_stopping_condition_timeout_parameter():
"""
Retrieve max computer time available (in seconds).
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('value', dtype='float64', direction=function.OUT, description = "Current value of avaible wallclock time in seconds")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_stopping_condition_number_of_steps_parameter():
"""
Set max inner loop evaluations.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('value', dtype='int32', direction=function.IN, description = "Available inner loop evaluations")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
@legacy_function
def get_stopping_condition_number_of_steps_parameter():
"""
Retrieve max inner loop evaluations.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('value', dtype='int32', direction=function.OUT, description = "Current number of avaible inner loop evaluations")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_stopping_condition_out_of_box_parameter():
"""
Set size of box.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('value', dtype='float64', direction=function.IN, description = "Size of box")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
@legacy_function
def get_stopping_condition_out_of_box_parameter():
"""
Get size of box
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('value', dtype='float64', direction=function.OUT, description = "Size of box")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
@legacy_function
def set_stopping_condition_minimum_density_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_stopping_condition_minimum_density_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_stopping_condition_maximum_density_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_stopping_condition_maximum_density_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_stopping_condition_minimum_internal_energy_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_stopping_condition_minimum_internal_energy_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_stopping_condition_maximum_internal_energy_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_stopping_condition_maximum_internal_energy_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def get_stopping_condition_out_of_box_use_center_of_mass_parameter():
"""
If True use the center of mass to determine the location of the box, if False use (0,0,0)
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('value', dtype='bool', direction=function.OUT, description = "True if detection should use center of mass")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
@legacy_function
def set_stopping_condition_out_of_box_use_center_of_mass_parameter():
"""
If True use the center of mass to determine the location of the box, if False use (0,0,0)
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('value', dtype='bool', direction=function.IN, description = "True if detection should use center of mass")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
class StoppingCondition(object):
def __init__(self, conditions, type, description):
self.conditions = conditions
self.type = type
self.description = description
self.__doc__ = description
def enable(self):
if self.is_supported():
self.conditions.code.enable_stopping_condition(self.type)
else:
name = [name for name, value in self.conditions.all_conditions() if value is self][0]
raise AmuseException("Can't enable stopping condition '{0}', since '{1}' does not support this condition.".format(name, type(self.conditions.code).__name__))
def disable(self):
if self.is_supported():
self.conditions.code.disable_stopping_condition(self.type)
else:
name = [name for name, value in self.conditions.all_conditions() if value is self][0]
raise AmuseException("Can't disable stopping condition '{0}', since '{1}' does not support this condition.".format(name, type(self.conditions.code).__name__))
def is_enabled(self):
return self.conditions.code.is_stopping_condition_enabled(self.type) == 1
def is_supported(self):
return self.conditions.code.has_stopping_condition(self.type) == 1
def is_set(self):
return self.conditions.code.is_stopping_condition_set(self.type) == 1
def get_set_condition_indices(self, index_in_condition):
indices = range(self.conditions.code.get_number_of_stopping_conditions_set())
if len(indices) == 0:
return []
types, number_of_particles = self.conditions.code.get_stopping_condition_info(indices)
result = []
for index, type, number_of_particles_in_condition in zip(indices, types, number_of_particles):
if type == self.type and index_in_condition < number_of_particles_in_condition:
result.append(index)
return result
def particles(self, index_in_the_condition=0, particles_set_name="particles"):
selected = self.get_set_condition_indices(index_in_the_condition)
particles = getattr(self.conditions.code,particles_set_name)
if len(selected) == 0:
return particles[0:0]
else:
return particles.get_stopping_condition_particle_index(
selected,
[index_in_the_condition]*len(selected)
)
class StoppingConditions(object):
def __init__(self, code):
self.code = code
self.collision_detection = StoppingCondition(
self,
0,
"If enabled, the code will stop at the end of the inner loop when two stars connect"
)
self.pair_detection = StoppingCondition(
self,
1,
"If enabled, the code will stop at the end of the inner loop when two stars are bound"
)
self.escaper_detection = StoppingCondition(
self,
2,
"If enabled, the code will stop at the end of the inner loop when a star escapes"
)
self.timeout_detection = StoppingCondition(
self,
3,
"If enabled, the code will stop at the end of the inner loop when the computer time is above a set timeout"
)
self.number_of_steps_detection = StoppingCondition(
self,
4,
"If enabled, the code will stop at the end of the inner loop when the number of evaluations reached the set max number"
)
self.out_of_box_detection = StoppingCondition(
self,
5,
"If enabled, the code will stop if a particle escapes the box of size out_of_box_size"
)
self.density_limit_detection = StoppingCondition(
self,
6,
"If enabled, the code will stop if a gas particle has a density out of the range "
"[stopping_condition_minimum_density, stopping_condition_maximum_density]"
)
self.internal_energy_limit_detection = StoppingCondition(
self,
7,
"If enabled, the code will stop if a gas particle has an internal energy out of the range "
"[stopping_condition_minimum_internal_energy, stopping_condition_maximum_internal_energy]"
)
self.interaction_over_detection = StoppingCondition(
self,
8,
"If enabled, the code will stop if the interaction between particles is over"
)
self.supernova_detection = StoppingCondition(
self,
9,
"If enabled, the code will stop at the end of the inner loop when two a star goes supernova"
)
def all_conditions(self):
for name in dir(self):
if name.startswith("_"):
continue
else:
value = getattr(self, name)
if isinstance(value, StoppingCondition):
yield name, value
def __str__(self):
parts = []
parts.append("Stopping conditions of a '{0}' object\n".format(type(self.code).__name__))
supported = self.supported_conditions()
enabled = [name for name, condition in self.all_conditions() if condition.is_enabled()]
hit = [name for name, condition in self.all_conditions() if condition.is_set()]
parts.append('* supported conditions: ')
parts.append(', '.join(supported))
parts.append('\n')
parts.append('* enabled conditions: ')
if enabled:
parts.append(', '.join(enabled))
else:
parts.append('none')
parts.append('\n')
parts.append('* set conditions: ')
if hit:
parts.append(', '.join(hit))
else:
parts.append('none')
parts.append('\n')
return ''.join(parts)
def supported_conditions(self):
return [name for name, condition in self.all_conditions() if condition.is_supported()]
def define_parameters(self, object):
object.add_method_parameter(
"get_stopping_condition_timeout_parameter",
"set_stopping_condition_timeout_parameter",
"stopping_conditions_timeout",
"max wallclock time available for the evolve step",
default_value = 4.0 | units.s
)
object.add_method_parameter(
"get_stopping_condition_number_of_steps_parameter",
"set_stopping_condition_number_of_steps_parameter",
"stopping_conditions_number_of_steps",
"max inner loop evals",
default_value = 1.0
)
object.add_method_parameter(
"get_stopping_condition_out_of_box_parameter",
"set_stopping_condition_out_of_box_parameter",
"stopping_conditions_out_of_box_size",
"size of cube",
default_value = 0.0 | nbody.length
)
object.add_method_parameter(
"get_stopping_condition_minimum_density_parameter",
"set_stopping_condition_minimum_density_parameter",
"stopping_condition_minimum_density",
"minimum density of a gas particle",
default_value = -1.0 | generic_unit_system.density
)
object.add_method_parameter(
"get_stopping_condition_maximum_density_parameter",
"set_stopping_condition_maximum_density_parameter",
"stopping_condition_maximum_density",
"maximum density of a gas particle",
default_value = -1.0 | generic_unit_system.density
)
object.add_method_parameter(
"get_stopping_condition_minimum_internal_energy_parameter",
"set_stopping_condition_minimum_internal_energy_parameter",
"stopping_condition_minimum_internal_energy",
"minimum internal energy of a gas particle",
default_value = -1.0 | generic_unit_system.specific_energy
)
object.add_method_parameter(
"get_stopping_condition_maximum_internal_energy_parameter",
"set_stopping_condition_maximum_internal_energy_parameter",
"stopping_condition_maximum_internal_energy",
"maximum internal energy of a gas particle",
default_value = -1.0 | generic_unit_system.specific_energy
)
object.add_method_parameter(
"get_stopping_condition_out_of_box_use_center_of_mass_parameter",
"set_stopping_condition_out_of_box_use_center_of_mass_parameter",
"stopping_conditions_out_of_box_use_center_of_mass",
"if True use the center of mass to determine the location of the box, if False use (0,0,0), is not used by all codes",
default_value = False
)
def define_methods(self, object):
object.add_method(
'get_stopping_condition_particle_index',
(
object.NO_UNIT,
object.NO_UNIT,
),
(
object.INDEX,
object.ERROR_CODE,
)
)
object.add_method(
'has_stopping_condition',
(
object.NO_UNIT,
),
(
object.NO_UNIT,
object.ERROR_CODE,
)
)
object.add_method(
'is_stopping_condition_enabled',
(
object.NO_UNIT,
),
(
object.NO_UNIT,
object.ERROR_CODE,
)
)
object.add_method(
'is_stopping_condition_set',
(
object.NO_UNIT,
),
(
object.NO_UNIT,
object.ERROR_CODE,
)
)
object.add_method(
'get_stopping_condition_info',
(
object.NO_UNIT,
),
(
object.NO_UNIT,
object.NO_UNIT,
object.ERROR_CODE,
)
)
object.add_method(
'get_number_of_stopping_conditions_set',
(
),
(
object.NO_UNIT,
object.ERROR_CODE,
)
)
object.add_method(
'enable_stopping_condition',
( object.NO_UNIT,),
(
object.ERROR_CODE
)
)
object.add_method(
'disable_stopping_condition',
( object.NO_UNIT,),
(
object.ERROR_CODE
)
)
object.add_method(
"get_stopping_condition_timeout_parameter",
(),
(units.s, object.ERROR_CODE,)
)
object.add_method(
"set_stopping_condition_timeout_parameter",
(units.s, ),
(object.ERROR_CODE,)
)
object.add_method(
"get_stopping_condition_number_of_steps_parameter",
(),
(object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
"set_stopping_condition_number_of_steps_parameter",
(object.NO_UNIT, ),
(object.ERROR_CODE,)
)
object.add_method(
"get_stopping_condition_out_of_box_parameter",
(),
(nbody.length, object.ERROR_CODE,)
)
object.add_method(
"set_stopping_condition_out_of_box_parameter",
(nbody.length, ),
(object.ERROR_CODE,)
)
object.add_method("get_stopping_condition_minimum_density_parameter",
(), (generic_unit_system.density, object.ERROR_CODE,))
object.add_method("set_stopping_condition_minimum_density_parameter",
(generic_unit_system.density, ), (object.ERROR_CODE,))
object.add_method("get_stopping_condition_maximum_density_parameter",
(), (generic_unit_system.density, object.ERROR_CODE,))
object.add_method("set_stopping_condition_maximum_density_parameter",
(generic_unit_system.density, ), (object.ERROR_CODE,))
object.add_method("get_stopping_condition_minimum_internal_energy_parameter",
(), (generic_unit_system.specific_energy, object.ERROR_CODE,))
object.add_method("set_stopping_condition_minimum_internal_energy_parameter",
(generic_unit_system.specific_energy, ), (object.ERROR_CODE,))
object.add_method("get_stopping_condition_maximum_internal_energy_parameter",
(), (generic_unit_system.specific_energy, object.ERROR_CODE,))
object.add_method("set_stopping_condition_maximum_internal_energy_parameter",
(generic_unit_system.specific_energy, ), (object.ERROR_CODE,))
def define_particle_set(self, object, name_of_the_set = 'particles'):
object.add_query(name_of_the_set, 'get_stopping_condition_particle_index')
def define_state(self, object):
for method_name in [
'get_stopping_condition_particle_index',
'has_stopping_condition',
'is_stopping_condition_enabled',
'is_stopping_condition_set',
'get_stopping_condition_info',
'get_number_of_stopping_conditions_set',
'enable_stopping_condition',
'disable_stopping_condition']:
object.add_method('!UNINITIALIZED!END', method_name)
```
#### File: community/kepler/interface.py
```python
from amuse.community import *
from amuse.community.interface.common import CommonCodeInterface, CommonCode
from amuse.support.options import option
from amuse.units import units
import os.path
class KeplerInterface(CodeInterface,
CommonCodeInterface):
"""
Kepler orbit manipulation functions, imported from Starlab.
Initialize an orbit from mass, pos, and vel, or mass, semi-major
axis and eccentricity, and allow the user to manipulate the
resulting structure. Most Starlab functionality is currently
exposed.
"""
# Interface specification.
include_headers = ['interface.h']
__so_module__ = 'kepler_cython'
def __init__(self, **options):
CodeInterface.__init__(self,
name_of_the_worker = "kepler_worker",
**options)
@legacy_function
def initialize_from_dyn():
"""
Initialize a new kepler system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('mass', dtype='float64', direction=function.IN,
unit = nbody_system.mass)
function.addParameter('x', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.addParameter('y', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.addParameter('z', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.addParameter('vx', dtype='float64', direction=function.IN,
unit = nbody_system.speed)
function.addParameter('vy', dtype='float64', direction=function.IN,
unit = nbody_system.speed)
function.addParameter('vz', dtype='float64', direction=function.IN,
unit = nbody_system.speed)
function.addParameter('time', dtype='float64', direction=function.IN,
default = 0, unit = nbody_system.time)
function.result_type = 'int32'
function.result_doc = """
0 - OK
new kepler was created
-1 - ERROR
kepler could not be created"""
return function
@legacy_function
def initialize_from_elements():
"""
Initialize a new kepler system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('mass', dtype='float64', direction=function.IN,
unit = nbody_system.mass)
function.addParameter('semi', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.addParameter('ecc', dtype='float64', direction=function.IN,
unit = NO_UNIT)
function.addParameter('mean_anomaly',
dtype='float64', direction=function.IN,
default = 0, unit = NO_UNIT)
function.addParameter('time', dtype='float64', direction=function.IN,
default = 0, unit = nbody_system.time)
function.addParameter('periastron',
dtype='float64', direction=function.IN,
default = 0, unit = nbody_system.length)
function.addParameter('random_orientation',
dtype='int32', direction=function.IN,
default = 0, unit = NO_UNIT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
new kepler was created
-1 - ERROR
kepler could not be created"""
return function
@legacy_function
def transform_to_time():
"""
Transform the kepler system to the specified time.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('time', dtype='float64', direction=function.IN,
unit = nbody_system.time)
function.result_type = 'int32'
function.result_doc = """
0 - OK
transform to time OK
-1 - ERROR
could not transform to time"""
return function
@legacy_function
def advance_to_radius():
"""
Evolve the kepler system forward in time to the specified radius.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('radius', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
advance to radius OK
-1 - ERROR
could not advance to radius"""
return function
@legacy_function
def return_to_radius():
"""
Evolve the kepler system backward in time to the specified radius.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('radius', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
return to radius OK
-1 - ERROR
could not return to radius"""
return function
@legacy_function
def advance_to_periastron():
"""
Evolve the kepler system forward in time to the next periastron.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.result_type = 'int32'
function.result_doc = """
0 - OK
advance to periastron OK
-1 - ERROR
could not advance to periastron"""
return function
@legacy_function
def advance_to_apastron():
"""
Evolve the kepler system forward in time to the next apastron.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.result_type = 'int32'
function.result_doc = """
0 - OK
advance to apastron OK
-1 - ERROR
could not advance to apastron"""
return function
@legacy_function
def return_to_periastron():
"""
Evolve the kepler system backward in time to the previous periastron.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.result_type = 'int32'
function.result_doc = """
0 - OK
return to periastron OK
-1 - ERROR
could not return to periastron"""
return function
@legacy_function
def return_to_apastron():
"""
Evolve the kepler system backward in time to the previous apastron.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.result_type = 'int32'
function.result_doc = """
0 - OK
return to apastron OK
-1 - ERROR
could not return to apastron"""
return function
@legacy_function
def get_total_mass():
"""
Return the total mass (remind the user) of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('mass', dtype='float64', direction=function.OUT,
unit = nbody_system.mass)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get mass OK
-1 - ERROR
could not get mass"""
return function
@legacy_function
def get_time():
"""
Return the current time of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('time', dtype='float64', direction=function.OUT,
unit = nbody_system.time)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get time OK
-1 - ERROR
could not get time"""
return function
@legacy_function
def get_period():
"""
Return the periodof the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('period', dtype='float64', direction=function.OUT,
unit = nbody_system.time)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get period OK
-1 - ERROR
could not get period"""
return function
@legacy_function
def get_elements():
"""
Return the orbital elements (a,e) of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('semi', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('ecc', dtype='float64', direction=function.OUT,
unit = NO_UNIT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get elements OK
-1 - ERROR
could not get elements"""
return function
@legacy_function
def get_integrals():
"""
Return the total energy and angular momentum of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('energy', dtype='float64', direction=function.OUT,
unit = nbody_system.speed*nbody_system.speed)
function.addParameter('angular_momentum',
dtype='float64', direction=function.OUT,
unit = nbody_system.length*nbody_system.speed)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get integrals OK
-1 - ERROR
could not get integrals"""
return function
@legacy_function
def get_separation_vector():
"""
Return the current separation vector (x,y,z) of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('x', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('y', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('z', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get separation vector OK
-1 - ERROR
could not get separation vector"""
return function
@legacy_function
def get_separation():
"""
Return the current separation r of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('r', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get separation OK
-1 - ERROR
could not get separation"""
return function
@legacy_function
def set_periastron():
"""
Set the current periastron of the system (initialization only).
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('peri', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
set periastron OK
-1 - ERROR
could not set periastron"""
return function
@legacy_function
def get_periastron():
"""
Return the current periastron of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('peri', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get periastron OK
-1 - ERROR
could not get periastron"""
return function
@legacy_function
def get_apastron():
"""
Return the current apastron of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('apo', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get apastron OK
-1 - ERROR
could not get apastron"""
return function
@legacy_function
def get_velocity_vector():
"""
Return the current relative velocity vector (x,y,z) of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('vx', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vy', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vz', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get velocity vector OK
-1 - ERROR
could not get velocity vector"""
return function
@legacy_function
def get_angles():
"""
Return the current mean and true anomalies of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('true_anomaly',
dtype='float64', direction=function.OUT)
function.addParameter('mean_anomaly',
dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get angles OK
-1 - ERROR
could not get angles"""
return function
@legacy_function
def set_longitudinal_unit_vector():
"""
Set the longitudinal unit vector of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('vx', dtype='float64', direction=function.IN)
function.addParameter('vy', dtype='float64', direction=function.IN)
function.addParameter('vz', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
set vector OK
-1 - ERROR
could not set vector"""
return function
@legacy_function
def set_normal_unit_vector():
"""
Set the normal unit vector of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('vx', dtype='float64', direction=function.IN)
function.addParameter('vy', dtype='float64', direction=function.IN)
function.addParameter('vz', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
set vector OK
-1 - ERROR
could not set vector"""
return function
@legacy_function
def get_longitudinal_unit_vector():
"""
Return the longitudinal unit vector of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('vx', dtype='float64', direction=function.OUT)
function.addParameter('vy', dtype='float64', direction=function.OUT)
function.addParameter('vz', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get vector OK
-1 - ERROR
could not get vector"""
return function
@legacy_function
def get_transverse_unit_vector():
"""
Return the transverse unit vector of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('vx', dtype='float64', direction=function.OUT)
function.addParameter('vy', dtype='float64', direction=function.OUT)
function.addParameter('vz', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get vector OK
-1 - ERROR
could not get vector"""
return function
@legacy_function
def set_transverse_unit_vector():
"""
Set the transverse unit vector of the system (tangent on longitudal uv).
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('vx', dtype='float64', direction=function.IN)
function.addParameter('vy', dtype='float64', direction=function.IN)
function.addParameter('vz', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
set vector OK
-1 - ERROR
could not set vector"""
return function
@legacy_function
def get_normal_unit_vector():
"""
Return the normal unit vector of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('vx', dtype='float64', direction=function.OUT)
function.addParameter('vy', dtype='float64', direction=function.OUT)
function.addParameter('vz', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get vector OK
-1 - ERROR
could not get vector"""
return function
@legacy_function
def print_all():
"""
Print a kepler system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.result_type = 'int32'
function.result_doc = """
0 - OK
kepler was printed
-1 - ERROR
kepler could not be printed"""
return function
@legacy_function
def set_random():
"""
Set the random seed for kepler functions.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('seed', dtype='int32', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
seed was initialized
-1 - ERROR
error occurred"""
return function
@legacy_function
def get_random():
"""
Return the random seed for kepler functions.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('seed', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
seed was returned
-1 - ERROR
error occurred"""
return function
@legacy_function
def make_binary_scattering():
"""
Return a three-body scattering configuration (much faster than python).
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('m', dtype='float64', direction=function.IN,
unit = nbody_system.mass)
function.addParameter('ecc', dtype='float64', direction=function.IN,
unit = NO_UNIT)
function.addParameter('M', dtype='float64', direction=function.IN,
unit = nbody_system.mass)
function.addParameter('v_inf', dtype='float64', direction=function.IN,
unit = nbody_system.speed)
function.addParameter('impact_parameter', dtype='float64',
direction=function.IN,
unit = nbody_system.length)
function.addParameter('gamma', dtype='float64', direction=function.IN,
unit = NO_UNIT)
function.addParameter('planar', dtype='int32', direction=function.IN,
unit = NO_UNIT)
function.addParameter('time', dtype='float64', direction=function.OUT,
unit = nbody_system.time)
function.addParameter('m1', dtype='float64', direction=function.OUT,
unit = nbody_system.mass)
function.addParameter('m2', dtype='float64', direction=function.OUT,
unit = nbody_system.mass)
function.addParameter('m3', dtype='float64', direction=function.OUT,
unit = nbody_system.mass)
function.addParameter('x1', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('x2', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('x3', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('y1', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('y2', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('y3', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('z1', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('z2', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('z3', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('vx1', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vx2', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vx3', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vy1', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vy2', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vy3', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vz1', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vz2', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vz3', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.result_type = 'int32'
function.result_doc = """
0 - OK
legal scattering configuration
-1 - ERROR
problem"""
return function
class Kepler(CommonCode):
__interface__ = KeplerInterface
def __init__(self, unit_converter = None, **options):
self.unit_converter = unit_converter
CommonCode.__init__(self,
self.__interface__(**options),
**options)
def define_converter(self, object):
if not self.unit_converter is None:
object.set_converter(self.unit_converter.as_converter_from_si_to_generic())
def define_methods(self, object):
CommonCode.define_methods(self, object)
def initialize_from_particles(self, particles):
if not len(particles) == 2:
raise Exception('The kepler code can only be initialized from a particle set with 2 particles')
total_mass = particles[0].mass + particles[1].mass
rel_position = particles[0].position - particles[1].position
rel_velocity = particles[0].velocity - particles[1].velocity
self.center_of_mass_position = particles.center_of_mass()
self.center_of_mass_velocity = particles.center_of_mass_velocity()
self.initialize_from_dyn(
total_mass,
rel_position[0],rel_position[1],rel_position[2],
rel_velocity[0],rel_velocity[1],rel_velocity[2]
)
self.particles = particles.copy()
def define_state(self, object):
CommonCode.define_state(self, object)
for method_name in [
'initialize_from_dyn',
'initialize_from_elements',
'transform_to_time',
'advance_to_radius',
'return_to_radius',
'advance_to_periastron',
'advance_to_apastron',
'return_to_periastron',
'return_to_apastron',
'get_total_mass',
'get_time',
'get_period',
'get_elements',
'get_integrals',
'get_separation_vector',
'get_separation',
'set_periastron',
'get_periastron',
'get_apastron',
'get_velocity_vector',
'get_angles',
'set_longitudinal_unit_vector',
'set_normal_unit_vector',
'get_longitudinal_unit_vector',
'get_transverse_unit_vector',
'set_transverse_unit_vector',
'get_normal_unit_vector',
'print_all',
'set_random',
'get_random',
'make_binary_scattering']:
object.add_method('!UNINITIALIZED!END', method_name)
```
#### File: community/kepler_orbiters/test_kepler.py
```python
import numpy
import struct
from interface import Kepler
from amuse.units import nbody_system
from amuse.units import units,constants
from amuse.ic.plummer import new_plummer_model
from amuse.datamodel import Particle
#from matplotlib import pyplot
import time
from amuse.ext.orbital_elements import orbital_elements_from_binary,new_binary_from_orbital_elements
def elements(starmass,x,y,z,vx,vy,vz,G=constants.G):
mu=G*starmass
r=(x**2+y**2+z**2)**0.5
v2=(vx**2+vy**2+vz**2)
e=v2/2-mu/r
a=-mu/2/e
hx=y*vz-z*vy
hy=z*vx-x*vz
hz=x*vy-y*vx
rdotv=x*vx+y*vy+z*vz
ex=v2*x/mu-rdotv*vx/mu-x/r
ey=v2*y/mu-rdotv*vy/mu-y/r
ez=v2*z/mu-rdotv*vz/mu-z/r
h2=(hx**2+hy**2+hz**2)
eps=(1-h2/mu/a)**0.5
return a,eps
def test_kepler(N=10000, tend=1.| units.yr,method=0):
numpy.random.seed(12345)
conv=nbody_system.nbody_to_si(2.| units.MSun, 5.|units.AU)
comets=new_plummer_model(N,conv)
sun=Particle(mass=1.|units.MSun)
sun.position=[0,0,0]|units.AU
sun.velocity=[0,0,0]|units.kms
comets.mass*=0.
code=Kepler(conv,redirection="none")
code.set_method(method)
code.central_particle.add_particle(sun)
code.orbiters.add_particles(comets)
a0,eps0=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz)
# print code.orbiters.x[0]
print orbital_elements_from_binary(code.particles[0:2],constants.G)
t1=time.time()
code.evolve_model(tend)
t2=time.time()
print orbital_elements_from_binary(code.particles[0:2],constants.G)
# print code.orbiters.x[0]
a,eps=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz)
da=abs((a-a0)/a0)
deps=abs(eps-eps0)/eps0
dev=numpy.where(da > 0.00001)[0]
print len(dev)
print a0[dev].value_in(units.AU)
print eps0[dev]
# pyplot.plot(a0[dev].value_in(units.AU),eps0[dev],"ro")
# pyplot.plot(a[dev].value_in(units.AU),eps[dev],"g+")
print "max da,deps:",da.max(), deps.max()
print "time:",t2-t1
# pyplot.show()
return t2-t1,da.max(),deps.max()
def test_kepler_almost_parabolic( tend=1,method=0):
code=Kepler(redirection="none")
code.set_method(method)
mass1=1.| nbody_system.mass
mass2=0| nbody_system.mass
semimajor_axis=1.|nbody_system.length
eccentricity=0.9999999
p=2*numpy.pi*(semimajor_axis**3/nbody_system.G/mass1)**0.5
tend=tend*p
print tend
parts=new_binary_from_orbital_elements(
mass1,
mass2,
semimajor_axis,
eccentricity = eccentricity,
true_anomaly = 0.0102121
)
code.central_particle.add_particle(parts[0])
code.orbiters.add_particle(parts[1])
a0,eps0=elements(mass1,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
print orbital_elements_from_binary(code.particles[0:2])
t1=time.time()
code.evolve_model(tend)
t2=time.time()
print orbital_elements_from_binary(code.particles[0:2])
print code.orbiters.position
a,eps=elements(mass1,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
da=abs((a-a0)/a0)
deps=abs(eps-eps0)/eps0
print da,deps
print "time:",t2-t1
def test_kepler_parabolic( tend=1,method=0, sign=+1):
code=Kepler(redirection="none")
code.set_method(method)
sun=Particle()
sun.mass=1. | nbody_system.mass
sun.x=0. | nbody_system.length
sun.y=0. | nbody_system.length
sun.z=0. | nbody_system.length
sun.vx=0. | nbody_system.speed
sun.vy=0. | nbody_system.speed
sun.vz=0. | nbody_system.speed
comet=Particle()
comet.mass= 0 | nbody_system.mass
comet.x=1. | nbody_system.length
comet.y=0. | nbody_system.length
comet.z=0. | nbody_system.length
comet.vx=0. | nbody_system.speed
comet.vy=(1.0 + sign * 1.0e-10)*(2*nbody_system.G*sun.mass/comet.x)**0.5
comet.vz=0. | nbody_system.speed
tend=tend | nbody_system.time
print tend
code.central_particle.add_particle(sun)
code.orbiters.add_particle(comet)
a0,eps0=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
print orbital_elements_from_binary(code.particles[0:2])
t1=time.time()
code.evolve_model(tend)
t2=time.time()
print orbital_elements_from_binary(code.particles[0:2])
print code.orbiters.position
a,eps=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
da=abs((a-a0)/a0)
deps=abs(eps-eps0)/eps0
print da,deps
print "time:",t2-t1
def crash_test(method=1):
code=Kepler(redirection="none")
code.set_method(method)
smu=1.224744871391589
mu=smu**2
r0=2.787802728537455
rv0=-0.9899959571994231
alpha=0.01380749549277993
smudt=2.809925892593303
v02=(mu*(2/r0-alpha))
vx=rv0
vy=(v02-vx**2)**0.5
sun=Particle()
sun.mass=mu | nbody_system.mass
sun.x=0. | nbody_system.length
sun.y=0. | nbody_system.length
sun.z=0. | nbody_system.length
sun.vx=0. | nbody_system.speed
sun.vy=0. | nbody_system.speed
sun.vz=0. | nbody_system.speed
comet=Particle()
comet.mass= 0 | nbody_system.mass
comet.x=r0| nbody_system.length
comet.y=0. | nbody_system.length
comet.z=0. | nbody_system.length
comet.vx=vx | nbody_system.speed
comet.vy=vy | nbody_system.speed
comet.vz=0. | nbody_system.speed
tend=(smudt/smu) | nbody_system.time
print tend
code.central_particle.add_particle(sun)
code.orbiters.add_particle(comet)
a0,eps0=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
print orbital_elements_from_binary(code.particles[0:2])
t1=time.time()
code.evolve_model(tend)
t2=time.time()
print orbital_elements_from_binary(code.particles[0:2])
print code.orbiters.position
a,eps=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
da=abs((a-a0)/a0)
deps=abs(eps-eps0)/eps0
print da,deps
print "time:",t2-t1
def crash_test2(method=1):
code=Kepler(redirection="none")
code.set_method(method)
"""
mu=struct.unpack('!d','3ff7ffffffffffff'.decode('hex'))[0]
dt=struct.unpack('!d','40025ab746b00001'.decode('hex'))[0]
pos1=struct.unpack('!d','bfed36dc82998ed4'.decode('hex'))[0]
pos2=struct.unpack('!d','40051297fc6e5256'.decode('hex'))[0]
pos3=struct.unpack('!d','0000000000000000'.decode('hex'))[0]
vel1=struct.unpack('!d','3fb09d8008ba33b9'.decode('hex'))[0]
vel2=struct.unpack('!d','bff06788b551b81d'.decode('hex'))[0]
vel3=struct.unpack('!d','0000000000000000'.decode('hex'))[0]
"""
mu=float.fromhex("0x1.8p+0")
dt=float.fromhex("0x1.25ab746bp+1")
pos1=float.fromhex("-0x1.d36dc82998ed4p-1")
pos2=float.fromhex("0x1.51297fc6e5256p+1")
pos3=float.fromhex("0x0p+0")
vel1=float.fromhex("0x1.09d8008ba33b9p-4")
vel2=float.fromhex("-0x1.06788b551b81ep+0")
vel3=float.fromhex("0x0p+0")
sun=Particle()
sun.mass=mu | nbody_system.mass
sun.x=0. | nbody_system.length
sun.y=0. | nbody_system.length
sun.z=0. | nbody_system.length
sun.vx=0. | nbody_system.speed
sun.vy=0. | nbody_system.speed
sun.vz=0. | nbody_system.speed
comet=Particle()
comet.mass= 0 | nbody_system.mass
comet.x=pos1 | nbody_system.length
comet.y=pos2 | nbody_system.length
comet.z=pos3 | nbody_system.length
comet.vx=vel1 | nbody_system.speed
comet.vy=vel2 | nbody_system.speed
comet.vz=vel3 | nbody_system.speed
tend=dt | nbody_system.time
print tend,mu
code.central_particle.add_particle(sun)
code.orbiters.add_particle(comet)
a0,eps0=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
print orbital_elements_from_binary(code.particles[0:2])
t1=time.time()
code.evolve_model(tend)
t2=time.time()
print orbital_elements_from_binary(code.particles[0:2])
print code.orbiters.position
a,eps=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
da=abs((a-a0)/a0)
deps=abs(eps-eps0)/eps0
print da,deps
print "time:",t2-t1
def test_softening(method=1):
code=Kepler(redirection="none")
code.set_method(method)
dt=float.fromhex("0x1.67b39e372f04dp+4")
mu=float.fromhex("0x1.fffffffffffdfp-3")
e2=float.fromhex("0x1.0000000000003p+0")
pos1=float.fromhex("0x1.1b76542265052p-1")
pos2=float.fromhex("0x1.0c4dbda42097cp-6")
pos3=float.fromhex("0x1.54fd66cd1e212p-3")
vel1=float.fromhex("0x1.d6ef43d58ca7ep-2")
vel2=float.fromhex("0x1.7a85379e59794p-2")
vel3=float.fromhex("-0x1.5421044d1acffp-1")
sun=Particle()
sun.mass=mu | nbody_system.mass
sun.x=0. | nbody_system.length
sun.y=0. | nbody_system.length
sun.z=0. | nbody_system.length
sun.vx=0. | nbody_system.speed
sun.vy=0. | nbody_system.speed
sun.vz=0. | nbody_system.speed
comet=Particle()
comet.mass= 0 | nbody_system.mass
comet.x=pos1 | nbody_system.length
comet.y=pos2 | nbody_system.length
comet.z=pos3 | nbody_system.length
comet.vx=vel1 | nbody_system.speed
comet.vy=vel2 | nbody_system.speed
comet.vz=vel3 | nbody_system.speed
tend=dt | nbody_system.time
print tend,mu
code.central_particle.add_particle(sun)
code.orbiters.add_particle(comet)
code.parameters.epsilon_squared = e2 | nbody_system.length**2
a0,eps0=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
print orbital_elements_from_binary(code.particles[0:2])
t1=time.time()
code.evolve_model(tend)
t2=time.time()
print orbital_elements_from_binary(code.particles[0:2])
print code.orbiters.position
a,eps=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
da=abs((a-a0)/a0)
deps=abs(eps-eps0)/eps0
print da,deps
print "time:",t2-t1
def t_linear(tend=1,N=100,method=0):
code=Kepler(redirection="none")
code.set_method(method)
mass=1. | nbody_system.mass
x=1. | nbody_system.length
vx=0 | nbody_system.speed
e=0.5*vx**2-nbody_system.G*mass/x
semimajor_axis=-nbody_system.G*mass/2/e
p=2*numpy.pi*(semimajor_axis**3/nbody_system.G/mass)**0.5
print semimajor_axis
print p
tend=tend*p
dt=p/N
sun=Particle()
sun.mass=mass
sun.x=0. | nbody_system.length
sun.y=0. | nbody_system.length
sun.z=0. | nbody_system.length
sun.vx=0. | nbody_system.speed
sun.vy=0. | nbody_system.speed
sun.vz=0. | nbody_system.speed
comet=Particle()
comet.mass= 0 | nbody_system.mass
comet.x=x
comet.y=0. | nbody_system.length
comet.z=0. | nbody_system.length
comet.vx=vx
comet.vy=0. | nbody_system.speed
comet.vz=0. | nbody_system.speed
code.central_particle.add_particle(sun)
code.orbiters.add_particle(comet)
a0,eps0=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
print orbital_elements_from_binary(code.particles[0:2])
#pyplot.ion()
#f=pyplot.figure(figsize=(8,6))
#pyplot.show()
tnow=0*tend
time=[]
xs=[]
while tnow<tend:
tnow+=dt
print tnow,int(tnow/dt)
code.evolve_model(tnow)
#f.clf()
time.append(tnow/tend)
xs.append(code.orbiters.x[0].number)
#pyplot.plot(time,xs,"r+")
#pyplot.xlim(-0.1,1.1)
#pyplot.ylim(-1.1,3.1)
#pyplot.draw()
print orbital_elements_from_binary(code.particles[0:2])
print code.orbiters.position
a,eps=elements(sun.mass,code.orbiters.x,code.orbiters.y,code.orbiters.z,
code.orbiters.vx,code.orbiters.vy,code.orbiters.vz,G=nbody_system.G)
da=abs((a-a0)/a0)
deps=abs(eps-eps0)/eps0
print da,deps
raw_input()
if __name__=="__main__":
for method in [1,0]:
t_linear(N=100,method=method)
print
print "-"*10
print
tend = 1.0
for method in [1,0]:
crash_test(method=method)
print
print "-"*10
print
for method in [1,0]:
crash_test2(method=method)
print
print "-"*10
print
for method in [1,0]:
test_kepler_parabolic(tend=tend,method=method, sign=+1)
print
print "-"*10
print
for method in [1,0]:
test_kepler_parabolic(tend=tend,method=method, sign=-1)
print
print "-"*10
print
for method in [1,0]:
test_kepler_almost_parabolic(tend=tend,method=method)
print
print "-"*10
print
for method in [1,0]:
test_kepler(N=10000,tend=tend | units.yr,method=method)
print
print "-"*10
print
for method in [0,]:
test_softening(method=method)
print
```
#### File: community/mpiamrvac/interface.py
```python
from amuse.community import *
from amuse.community.interface.hydro import HydrodynamicsInterface
from amuse.support.options import OptionalAttributes, option
from amuse.units import generic_unit_system
from amuse.community.interface.common import CommonCode
import os
class MpiAmrVacInterface(CodeInterface, HydrodynamicsInterface, StoppingConditionInterface,
CodeWithDataDirectories):
use_modules = ['mpiamrvac_interface', 'StoppingConditions']
MODE_NORMAL = 'normal'
MODE_3D = '3d'
MODE_3D_ACC = '3d-acc'
MODE_2D = '2d'
MODE_2D_ACC = '2d-acc'
MODE_1D = '1d'
MODE_1D_ACC = '1d-acc'
def __init__(self, mode = MODE_NORMAL, **options):
CodeInterface.__init__(self, name_of_the_worker=self.name_of_the_worker(mode), **options)
CodeWithDataDirectories.__init__(self)
self._mode = mode
def name_of_the_worker(self, mode):
if mode == self.MODE_NORMAL or mode == self.MODE_3D:
return 'mpiamrvac_worker'
elif mode == self.MODE_3D_ACC:
return 'mpiamrvac_worker_acc'
elif mode == self.MODE_2D:
return 'mpiamrvac_worker_2d'
elif mode == self.MODE_2D_ACC:
return 'mpiamrvac_worker_2dacc'
elif mode == self.MODE_1D:
return 'mpiamrvac_worker_1d'
elif mode == self.MODE_1D_ACC:
return 'mpiamrvac_worker_1dacc'
else:
return 'mpiamrvac_worker'
#
# options
#
@option(type="string")
def default_parameters_filename(self):
"""
Default parameter file for amrvac, has empty lists for
all parameters.
"""
if self._mode == self.MODE_2D:
return os.path.join(self.data_directory, 'amrvac_2d.par')
elif self._mode == self.MODE_2D_ACC:
return os.path.join(self.data_directory, 'amrvac_2d-acc.par')
elif self._mode == self.MODE_1D:
return os.path.join(self.data_directory, 'amrvac_1d.par')
elif self._mode == self.MODE_1D_ACC:
return os.path.join(self.data_directory, 'amrvac_1d-acc.par')
elif self._mode == self.MODE_3D_ACC:
return os.path.join(self.data_directory, 'amrvac-acc.par')
else:
return os.path.join(self.data_directory, 'amrvac.par')
#
# parameters
#
@legacy_function
def set_typeentropy():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typeentropy():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typefull1():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typefull1():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typepred1():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typepred1():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_gamma():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_gamma():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dt():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dt():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_nbufferx1():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_nbufferx1():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_nbufferx2():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_nbufferx2():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_nbufferx3():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_nbufferx3():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_mxnest():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_mxnest():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dixb():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dixb():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_levmin():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_levmin():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_levmax():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_levmax():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_skipfinestep():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_skipfinestep():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_time_advance():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_time_advance():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_courantpar():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_courantpar():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dtpar():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dtpar():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dtdiffpar():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dtdiffpar():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_t():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_t():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_tmax():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_tmax():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dtmin():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dtmin():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_residmin():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_residmin():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_residmax():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_residmax():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_residual():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_residual():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_tfixgrid():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_tfixgrid():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_tvdlfeps():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_tvdlfeps():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_mcbeta():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_mcbeta():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_divbdiff():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_divbdiff():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_smallp():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_smallp():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_smallrho():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_smallrho():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dmaxvel():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dmaxvel():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_tolernr():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_tolernr():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_absaccnr():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_absaccnr():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_cfrac():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_cfrac():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_x1ptms():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_x1ptms():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_x2ptms():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_x2ptms():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_x3ptms():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_x3ptms():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_ptmass():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_ptmass():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_ratebdflux():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_ratebdflux():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_normt():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_normt():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_time_bc():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_time_bc():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_it():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_it():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_itmax():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_itmax():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_itmin():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_itmin():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_slowsteps():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_slowsteps():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typepario():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typepario():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_itfixgrid():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_itfixgrid():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_nwauxio():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_nwauxio():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_istep():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_istep():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_nstep():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_nstep():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_errorestimate():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_errorestimate():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_nxdiffusehllc():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_nxdiffusehllc():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typespherical():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typespherical():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_maxitnr():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_maxitnr():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_nflatgetaux():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_nflatgetaux():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_level_io():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_level_io():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_ncool():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_ncool():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_cmulti():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_cmulti():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_snapshotini():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_snapshotini():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_ixtest1():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_ixtest1():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_ixtest2():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_ixtest2():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_ixtest3():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_ixtest3():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_iwtest():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_iwtest():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_idimtest():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_idimtest():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_saveigrid():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_saveigrid():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typecourant():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typecourant():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typeresid():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typeresid():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typeadvance():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typeadvance():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typelimited():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typelimited():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typesourcesplit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typesourcesplit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typelimiter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typelimiter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typegradlimiter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typegradlimiter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typeprolonglimit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typeprolonglimit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typetvd():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typetvd():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typetvdlf():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typetvdlf():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typeaverage():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typeaverage():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typedimsplit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typedimsplit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typeaxial():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typeaxial():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typepoly():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typepoly():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typedivbdiff():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typedivbdiff():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typedivbfix():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typedivbfix():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typediv():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typediv():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typegrad():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typegrad():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typeglm():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typeglm():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_coolcurve():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_coolcurve():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_coolmethod():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_coolmethod():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typeghostfill():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typeghostfill():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typegridfill():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typegridfill():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_filenameout():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_filenameout():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_filenameini():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_filenameini():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_filenamelog():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_filenamelog():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_fileheadout():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_fileheadout():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_wnames():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_wnames():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_primnames():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_primnames():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_typefilelog():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_typefilelog():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_convert_type():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_convert_type():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dxfiletype():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dxfiletype():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_teststr():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_teststr():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='string', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_time_accurate():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_time_accurate():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_addmpibarrier():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_addmpibarrier():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_tmaxexact():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_tmaxexact():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_treset():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_treset():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_itreset():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_itreset():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_firstprocess():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_firstprocess():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_fixprocess():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_fixprocess():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_flathllc():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_flathllc():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_flatcd():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_flatcd():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_flatsh():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_flatsh():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_flatppm():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_flatppm():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_sourcesplit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_sourcesplit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_sourceunsplit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_sourceunsplit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_useprimitive():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_useprimitive():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dimsplit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dimsplit():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_restrictprimitive():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_restrictprimitive():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_prolongprimitive():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_prolongprimitive():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_coarsenprimitive():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_coarsenprimitive():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_useprimitiverel():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_useprimitiverel():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_amrentropy():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_amrentropy():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_divbfix():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_divbfix():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_divbwave():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_divbwave():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_compactres():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_compactres():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_bnormlf():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_bnormlf():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_strictnr():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_strictnr():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_strictsmall():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_strictsmall():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_strictzero():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_strictzero():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_strictgetaux():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_strictgetaux():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_usecovariant():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_usecovariant():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_nocartesian():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_nocartesian():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_tfix():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_tfix():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_convert():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_convert():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_saveprim():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_saveprim():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_uselimiter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_uselimiter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
# parameters file
@legacy_function
def set_parameters_filename():
"""
Update name of the parameters file
"""
function = LegacyFunctionSpecification()
function.addParameter('path',
dtype='string',
direction=function.IN,
description = "filename of the parameters file"
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was set
-1 - ERROR
File does not exist
"""
return function
@legacy_function
def get_parameters_filename():
"""
Retrieve name of the parameters file
"""
function = LegacyFunctionSpecification()
function.addParameter('path',
dtype='string',
direction=function.OUT,
description = "filename of the parameters file"
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was set
-1 - ERROR
File does not exist
"""
return function
@legacy_function
def get_current_error():
"""When a function returns an error, this will retrieve
a description (if possible)
"""
function = LegacyFunctionSpecification()
function.addParameter('string',
dtype='string',
direction=function.OUT,
description = "description of the error"
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was set
-1 - ERROR
File does not exist
"""
return function
#
#
@legacy_function
def initialize_grid():
function = LegacyFunctionSpecification()
function.result_type = 'int32'
return function
@legacy_function
def refine_grid():
function = LegacyFunctionSpecification()
function.addParameter('must_advance', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def get_mesh_size():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('nmeshx', dtype='i', direction=function.OUT)
function.addParameter('nmeshy', dtype='i', direction=function.OUT)
function.addParameter('nmeshz', dtype='i', direction=function.OUT)
function.addParameter('index_of_grid', dtype='i', direction=function.IN)
function.result_type = 'i'
return function
@legacy_function
def get_position_of_index():
function = LegacyFunctionSpecification()
function.must_handle_array = True
for x in ['i','j','k']:
function.addParameter(x, dtype='i', direction=function.IN)
function.addParameter('index_of_grid', dtype='i', direction=function.IN, default = 1)
for x in ['x','y','z']:
function.addParameter(x, dtype='d', direction=function.OUT)
function.addParameter('n', dtype='i', direction=function.LENGTH)
function.result_type = 'i'
return function
@legacy_function
def get_index_of_position():
"""
Retrieves the i,j and k index of the grid cell containing the
given x, y and z position. The cell is looked up
in the grid specified by index_of_grid.
"""
function = LegacyFunctionSpecification()
function.must_handle_array = True
for x in ['x','y','z']:
function.addParameter(x, dtype='d', direction=function.IN)
function.addParameter('index_of_grid', dtype='i', direction=function.IN, default = 1)
for x in ['i','j','k']:
function.addParameter(x, dtype='d', direction=function.OUT)
function.addParameter('n', dtype='i', direction=function.LENGTH)
function.result_type = 'i'
return function
@legacy_function
def get_level_of_grid():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('level', dtype='i', direction=function.OUT)
function.addParameter('index_of_grid', dtype='i', direction=function.IN)
function.result_type = 'i'
return function
@legacy_function
def get_cell_size_of_grid():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('dx', dtype='d', direction=function.OUT)
function.addParameter('dy', dtype='d', direction=function.OUT)
function.addParameter('dz', dtype='d', direction=function.OUT)
function.addParameter('index_of_grid', dtype='i', direction=function.IN)
function.result_type = 'i'
return function
@legacy_function
def setup_mesh():
function = LegacyFunctionSpecification()
function.addParameter('nmeshx', dtype='i', direction=function.IN)
function.addParameter('nmeshy', dtype='i', direction=function.IN)
function.addParameter('nmeshz', dtype='i', direction=function.IN)
function.addParameter('xlength', dtype='d', direction=function.IN)
function.addParameter('ylength', dtype='d', direction=function.IN)
function.addParameter('zlength', dtype='d', direction=function.IN)
function.result_type = 'i'
return function
#
#
#
@legacy_function
def set_boundary_in_code():
function = LegacyFunctionSpecification()
for x in ["xbound1","xbound2","ybound1","ybound2","zbound1","zbound2"]:
function.addParameter(x, dtype='string', direction=function.IN)
function.result_type = 'i'
return function
def set_boundary(self, xbound1, xbound2, ybound1, ybound2, zbound1, zbound2):
map_from_amuse_to_mpiamrvac= {
"reflective": "symm",
"outflow":"limitinflow",
"periodic":"periodic",
"interface": "special",
}
return self.set_boundary_in_code(
map_from_amuse_to_mpiamrvac.setdefault(xbound1, xbound1),
map_from_amuse_to_mpiamrvac.setdefault(xbound2, xbound2),
map_from_amuse_to_mpiamrvac.setdefault(ybound1, ybound1),
map_from_amuse_to_mpiamrvac.setdefault(ybound2, ybound2),
map_from_amuse_to_mpiamrvac.setdefault(zbound1, zbound1),
map_from_amuse_to_mpiamrvac.setdefault(zbound2, zbound2)
)
#
#
#
@legacy_function
def get_time():
function = LegacyFunctionSpecification()
function.addParameter('time', dtype='d', direction=function.OUT)
function.result_type = 'i'
return function
#
#
#
@legacy_function
def get_acceleration_grid_position_of_index():
function = LegacyFunctionSpecification()
function.must_handle_array = True
for x in ['i','j','k']:
function.addParameter(x, dtype='i', direction=function.IN)
#function.addParameter('index_of_grid', dtype='i', direction=function.IN, default = 1)
for x in ['x','y','z']:
function.addParameter(x, dtype='d', direction=function.OUT)
function.addParameter('n', dtype='i', direction=function.LENGTH)
function.result_type = 'i'
return function
@legacy_function
def get_acceleration_grid_acceleration():
function = LegacyFunctionSpecification()
function.must_handle_array = True
for x in ['i','j','k']:
function.addParameter(x, dtype='i', direction=function.IN)
#function.addParameter('index_of_grid', dtype='i', direction=function.IN, default = 1)
for x in ['a1','a2', 'a3']:
function.addParameter(x, dtype='d', direction=function.OUT)
function.addParameter('n', dtype='i', direction=function.LENGTH)
function.result_type = 'i'
return function
@legacy_function
def set_acceleration_grid_acceleration():
function = LegacyFunctionSpecification()
function.must_handle_array = True
for x in ['i','j','k']:
function.addParameter(x, dtype='i', direction=function.IN)
#function.addParameter('index_of_grid', dtype='i', direction=function.IN, default = 1)
for x in ['a1','a2', 'a3']:
function.addParameter(x, dtype='d', direction=function.IN)
function.addParameter('n', dtype='i', direction=function.LENGTH)
function.result_type = 'i'
return function
@legacy_function
def get_acceleration_grid_size():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('nmeshx', dtype='i', direction=function.OUT)
function.addParameter('nmeshy', dtype='i', direction=function.OUT)
function.addParameter('nmeshz', dtype='i', direction=function.OUT)
function.result_type = 'i'
return function
@legacy_function
def get_grid_acceleration():
function = LegacyFunctionSpecification()
function.must_handle_array = True
for x in ['i','j','k']:
function.addParameter(x, dtype='i', direction=function.IN)
function.addParameter('index_of_grid', dtype='i', direction=function.IN, default = 1)
for x in ['ax','ay','az']:
function.addParameter(x, dtype='d', direction=function.OUT)
function.addParameter('number_of_points', 'i', function.LENGTH)
function.result_type = 'i'
return function
@legacy_function
def set_grid_acceleration():
function = LegacyFunctionSpecification()
function.must_handle_array = True
for x in ['i','j','k']:
function.addParameter(x, dtype='i', direction=function.IN)
for x in ['ax','ay','az']:
function.addParameter(x, dtype='d', direction=function.IN)
function.addParameter('index_of_grid', dtype='i', direction=function.IN, default = 1)
function.addParameter('number_of_points', 'i', function.LENGTH)
function.result_type = 'i'
return function
@legacy_function
def get_hydro_state_at_point():
function = LegacyFunctionSpecification()
for x in ['x','y','z']:
function.addParameter(x, dtype='d', direction=function.IN)
for x in ['vx','vy','vz']:
function.addParameter(x, dtype='d', direction=function.IN, default = 0)
for x in ['rho','rhovx','rhovy','rhovz','rhoe']:
function.addParameter(x, dtype='d', direction=function.OUT)
function.addParameter('npoints', dtype='i', direction=function.LENGTH)
function.result_type = 'i'
function.must_handle_array = True
return function
class MpiAmrVac(CommonCode):
def __init__(self, unit_converter = None, **options):
self.unit_converter = unit_converter
self.stopping_conditions = StoppingConditions(self)
CommonCode.__init__(self, MpiAmrVacInterface(**options), **options)
self.set_parameters_filename(self.default_parameters_filename)
def define_converter(self, object):
if self.unit_converter is None:
return
object.set_converter(self.unit_converter.as_converter_from_si_to_generic())
def get_index_range_inclusive(self, index_of_grid = 1):
nx, ny, nz = self.get_mesh_size(index_of_grid)
return (0, nx-1, 0, ny-1, 0, nz-1)
def define_properties(self, object):
object.add_property('get_time', public_name = "model_time")
def define_methods(self, object):
object.add_method(
'evolve_model',
(generic_unit_system.time,),
(object.ERROR_CODE,)
)
object.add_method(
'commit_parameters',
(),
(object.ERROR_CODE,)
)
object.add_method(
'get_position_of_index',
(object.INDEX, object.INDEX, object.INDEX, object.INDEX),
(generic_unit_system.length, generic_unit_system.length, generic_unit_system.length, object.ERROR_CODE,)
)
object.add_method(
'get_acceleration_grid_position_of_index',
(object.INDEX, object.INDEX, object.INDEX),
(generic_unit_system.length, generic_unit_system.length, generic_unit_system.length, object.ERROR_CODE,)
)
density = generic_unit_system.density
momentum = generic_unit_system.momentum_density
energy = generic_unit_system.energy_density
acceleration = generic_unit_system.length / generic_unit_system.time ** 2
object.add_method(
'get_acceleration_grid_size',
(),
(object.NO_UNIT,object.NO_UNIT,object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
'get_acceleration_grid_acceleration',
(object.INDEX, object.INDEX, object.INDEX),
(acceleration, acceleration, acceleration, object.ERROR_CODE,)
)
object.add_method(
'set_acceleration_grid_acceleration',
(object.INDEX, object.INDEX, object.INDEX, acceleration, acceleration, acceleration,),
(object.ERROR_CODE,)
)
object.add_method(
'set_grid_energy_density',
(object.INDEX, object.INDEX, object.INDEX, energy, object.INDEX),
(object.ERROR_CODE,)
)
object.add_method(
'set_grid_density',
(object.INDEX, object.INDEX, object.INDEX, density, object.INDEX),
(object.ERROR_CODE,)
)
object.add_method(
'set_grid_momentum_density',
(object.INDEX, object.INDEX, object.INDEX, momentum, momentum, momentum, object.INDEX),
(object.ERROR_CODE,)
)
object.add_method(
'get_grid_energy_density',
(object.INDEX, object.INDEX, object.INDEX, object.INDEX),
( energy,
object.ERROR_CODE,)
)
object.add_method(
'get_grid_density',
(object.INDEX, object.INDEX, object.INDEX, object.INDEX),
(density,
object.ERROR_CODE,)
)
object.add_method(
'get_grid_momentum_density',
(object.INDEX, object.INDEX, object.INDEX, object.INDEX),
( momentum, momentum, momentum,
object.ERROR_CODE,)
)
object.add_method(
'refine_grid',
(),
(object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
'get_level_of_grid',
(object.INDEX),
(object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
"get_gamma",
(),
(object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
"set_gamma",
(object.NO_UNIT, ),
(object.ERROR_CODE,)
)
object.add_method(
"get_typeentropy",
(),
(object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
"set_typeentropy",
(object.NO_UNIT,),
(object.ERROR_CODE,)
)
object.add_method(
"get_typefull1",
(),
(object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
"set_typefull1",
(object.NO_UNIT,),
(object.ERROR_CODE,)
)
object.add_method(
"get_typepred1",
(),
(object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
"set_typepred1",
(object.NO_UNIT,),
(object.ERROR_CODE,)
)
object.add_method(
"get_typeadvance",
(),
(object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
"set_typeadvance",
(object.NO_UNIT,),
(object.ERROR_CODE,)
)
object.add_method(
"get_courantpar",
(),
(object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
"set_courantpar",
(object.NO_UNIT, ),
(object.ERROR_CODE,)
)
object.add_method(
"get_mxnest",
(),
(object.NO_UNIT, object.ERROR_CODE,)
)
object.add_method(
"set_mxnest",
(object.NO_UNIT, ),
(object.ERROR_CODE,)
)
object.add_method(
'get_time',
(),
(generic_unit_system.time, object.ERROR_CODE,)
)
object.add_method(
'setup_mesh',
(object.NO_UNIT, object.NO_UNIT, object.NO_UNIT, generic_unit_system.length, generic_unit_system.length, generic_unit_system.length, ),
(object.ERROR_CODE,)
)
object.add_method(
'set_boundary',
(object.NO_UNIT, object.NO_UNIT, object.NO_UNIT, object.NO_UNIT, object.NO_UNIT, object.NO_UNIT,),
(object.ERROR_CODE,)
)
object.add_method(
'set_grid_acceleration',
(object.INDEX, object.INDEX, object.INDEX, acceleration, acceleration, acceleration, object.INDEX),
(object.ERROR_CODE,)
)
object.add_method(
'get_grid_acceleration',
(object.INDEX, object.INDEX, object.INDEX, object.INDEX),
(acceleration, acceleration, acceleration, object.ERROR_CODE,)
)
object.add_method(
'get_hydro_state_at_point',
(generic_unit_system.length, generic_unit_system.length, generic_unit_system.length,
generic_unit_system.speed, generic_unit_system.speed, generic_unit_system.speed),
(generic_unit_system.density, generic_unit_system.momentum_density, generic_unit_system.momentum_density,
generic_unit_system.momentum_density, generic_unit_system.energy_density, object.ERROR_CODE)
)
self.stopping_conditions.define_methods(object)
def define_parameters(self, object):
object.add_method_parameter(
"get_gamma",
"set_gamma",
"gamma",
"ratio of specific heats used in equation of state",
default_value = 1.6666666666666667
)
object.add_method_parameter(
"get_typeentropy",
"set_typeentropy",
"entropy_type",
"type of the entropy",
default_value = 'nul'
)
object.add_method_parameter(
"get_typefull1",
"set_typefull1",
"spatial_discretization_method",
"the spatial discretization method used for the time integration per activated grid leve",
default_value = 'tvdmu'
)
object.add_method_parameter(
"get_typepred1",
"set_typepred1",
"predictor_step_discretization_method",
"the precitor step discretization method (only used when integration procedure is twostep')",
default_value = 'tvdmu'
)
object.add_method_parameter(
"get_typeadvance",
"set_typeadvance",
"time_integration_procedure",
"time integration procedure",
default_value = 'twostep'
)
object.add_method_parameter(
"get_courantpar",
"set_courantpar",
"courant_number",
"CFL number",
default_value = 0.7
)
object.add_caching_parameter(
"setup_mesh",
"nmeshx",
"nx",
"number of cells in the x direction",
10,
)
object.add_caching_parameter(
"setup_mesh",
"nmeshy",
"ny",
"number of cells in the y direction",
10,
)
object.add_caching_parameter(
"setup_mesh",
"nmeshz",
"nz",
"number of cells in the z direction",
10,
)
object.add_caching_parameter(
"setup_mesh",
"xlength",
"length_x",
"length of model in the x direction",
10 | generic_unit_system.length,
)
object.add_caching_parameter(
"setup_mesh",
"ylength",
"length_y",
"length of model in the x direction",
10 | generic_unit_system.length,
)
object.add_caching_parameter(
"setup_mesh",
"zlength",
"length_z",
"length of model in the z direction",
10 | generic_unit_system.length,
)
object.add_vector_parameter(
"mesh_size",
"number of cells in the x, y and z directions",
("nx", "ny", "nz")
)
object.add_vector_parameter(
"mesh_length",
"length of the model in the x, y and z directions",
("length_x", "length_y", "length_z")
)
object.add_caching_parameter(
"set_boundary",
"xbound1",
"xbound1",
"boundary conditions on first (inner, left) X boundary",
"reflective",
)
object.add_caching_parameter(
"set_boundary",
"xbound2",
"xbound2",
"boundary conditions on second (outer, right) X boundary",
"reflective",
)
object.add_caching_parameter(
"set_boundary",
"ybound1",
"ybound1",
"boundary conditions on first (inner, front) Y boundary",
"reflective",
)
object.add_caching_parameter(
"set_boundary",
"ybound2",
"ybound2",
"boundary conditions on second (outer, back) Y boundary",
"reflective",
)
object.add_caching_parameter(
"set_boundary",
"zbound1",
"zbound1",
"boundary conditions on first (inner, bottom) Z boundary",
"reflective",
)
object.add_caching_parameter(
"set_boundary",
"zbound2",
"zbound2",
"boundary conditions on second (outer, top) Z boundary",
"reflective",
)
object.add_vector_parameter(
"x_boundary_conditions",
"boundary conditions for the X directorion",
("xbound1", "xbound2")
)
object.add_vector_parameter(
"y_boundary_conditions",
"boundary conditions for the Y directorion",
("ybound1", "ybound2")
)
object.add_vector_parameter(
"z_boundary_conditions",
"boundary conditions for the Z directorion",
("zbound1", "zbound2")
)
object.add_method_parameter(
"get_mxnest",
"set_mxnest",
"maximum_number_of_grid_levels",
"the maximum number of grid levels that can be used during the simulation, including the base grid level",
default_value = 3
)
object.add_method_parameter(
"get_time_accurate",
"set_time_accurate",
"time_accurate",
"if false will evolve to the given time, if true will take accurate steps using courant timesteps",
default_value = 3
)
object.add_method_parameter(
"get_dtpar",
"set_dtpar",
"timestep",
"if greater than zero will fix the timestep to the given value",
default_value = 3
)
self.stopping_conditions.define_parameters(object)
def commit_parameters(self):
self.parameters.send_cached_parameters_to_code()
self.overridden().commit_parameters()
def get_acceleration_grid_index_range_inclusive(self):
nx, ny, nz = self.get_acceleration_grid_size()
return (1, nx, 1, ny, 1, nz)
def define_particle_sets(self, object):
object.define_grid('acceleration_grid')
object.set_grid_range('acceleration_grid', 'get_acceleration_grid_index_range_inclusive')
object.add_getter('acceleration_grid', 'get_acceleration_grid_position_of_index', names=('x','y','z'))
object.add_getter('acceleration_grid', 'get_acceleration_grid_acceleration', names=('ax','ay','az'))
object.add_setter('acceleration_grid', 'set_acceleration_grid_acceleration', names=('ax','ay','az'))
def itergrids(self):
n = self.get_number_of_grids()
for x in range(1,n+1):
yield self._create_new_grid(self.specify_grid, index_of_grid = x)
def specify_grid(self, definition, index_of_grid = 1):
definition.set_grid_range('get_index_range_inclusive')
definition.add_getter('get_position_of_index', names=('x','y','z'))
definition.add_setter('set_grid_density', names=('rho',))
definition.add_setter('set_grid_momentum_density', names=('rhovx','rhovy','rhovz'))
definition.add_setter('set_grid_energy_density', names=('energy',))
definition.add_getter('get_grid_density', names=('rho',))
definition.add_getter('get_grid_momentum_density', names=('rhovx','rhovy','rhovz'))
definition.add_getter('get_grid_energy_density', names=('energy',))
definition.add_getter('get_grid_acceleration', names=('ax','ay','az'))
definition.add_setter('set_grid_acceleration', names=('ax','ay','az'))
definition.define_extra_keywords({'index_of_grid':index_of_grid})
def define_state(self, object):
CommonCode.define_state(self, object)
#object.add_transition('END', 'INITIALIZED', 'initialize_code', False)
object.add_transition('INITIALIZED','EDIT','commit_parameters')
object.add_transition('RUN','CHANGE_PARAMETERS_RUN','before_set_parameter', False)
object.add_transition('EDIT','CHANGE_PARAMETERS_EDIT','before_set_parameter', False)
object.add_transition('CHANGE_PARAMETERS_RUN','RUN','recommit_parameters')
object.add_transition('CHANGE_PARAMETERS_EDIT','EDIT','recommit_parameters')
object.add_method('CHANGE_PARAMETERS_RUN', 'before_set_parameter')
object.add_method('CHANGE_PARAMETERS_EDIT', 'before_set_parameter')
object.add_method('CHANGE_PARAMETERS_RUN', 'before_get_parameter')
object.add_method('CHANGE_PARAMETERS_EDIT', 'before_get_parameter')
object.add_method('RUN', 'before_get_parameter')
object.add_method('EDIT', 'before_get_parameter')
object.add_transition('EDIT', 'RUN', 'initialize_grid')
object.add_method('RUN', 'evolve_model')
object.add_method('RUN', 'get_hydro_state_at_point')
for state in ['EDIT', 'RUN']:
for methodname in [
'get_grid_density',
'set_grid_density',
'set_grid_energy_density',
'get_grid_energy_density',
'get_grid_momentum_density',
'set_grid_momentum_density',
'get_position_of_index',
'get_index_of_position',
'set_grid_scalar',
'get_grid_scalar',
'get_mesh_size',
'get_acceleration_grid_acceleration',
'set_acceleration_grid_acceleration',
'get_acceleration_grid_size',
'get_mesh_size',
'get_number_of_grids',
'get_level_of_grid',
'refine_grid'
]:
object.add_method(state, methodname)
self.stopping_conditions.define_state(object)
```
#### File: community/ph4/test_sync.py
```python
import math
import collections
import getopt
import numpy
import os
import random
import sys
import unittest
from time import clock as cputime
from time import time as wallclocktime
from amuse.community.ph4.interface import ph4 as grav
from amuse.units import nbody_system
from amuse.units import units
from amuse import datamodel
from amuse.datamodel import particle_attributes as pa
from amuse.rfi.core import is_mpd_running
from amuse.ic.plummer import new_plummer_model
from amuse.ic.salpeter import new_salpeter_mass_distribution_nbody
def print_log(pre, time, gravity, E0 = 0.0 | nbody_system.energy,
cpu0 = 0.0, wall0 = 0.0):
# Standard log output.
cpu = cputime()
wall = wallclocktime()
N = len(gravity.particles)
M = gravity.total_mass
U = gravity.potential_energy
T = gravity.kinetic_energy
Etop = T + U
E = Etop
if E0 == 0 | nbody_system.energy: E0 = E
Rvir = -0.5*M*M/U
Q = -T/U
com = pa.center_of_mass(gravity.particles)
comv = pa.center_of_mass_velocity(gravity.particles)
if N >= 100:
dcen,rcore,rhocore \
= pa.densitycentre_coreradius_coredens(gravity.particles)
cmx,cmy,cmz = dcen
lagr,mf = pa.LagrangianRadii(gravity.particles, cm=dcen) # no units!
print ''
print pre+"time=", time.number
print pre+"cpu=", cpu-cpu0
print pre+"wall=", wall-wall0
print pre+"Ntot=", N
print pre+"mass=", M.number
print pre+"Etot=", E.number
print pre+"dE/E=", E/E0 - 1
print pre+"Rvir=", Rvir.number
print pre+"Qvir=", Q
cmx,cmy,cmz = com
print pre+"cmpos[3]= %.8f %.8f %.8f" % (cmx.number, cmy.number, cmz.number)
cmx,cmy,cmz = comv
print pre+"cmvel[3]= %.8f %.8f %.8f" % (cmx.number, cmy.number, cmz.number)
if N >= 100:
cmx,cmy,cmz = dcen
print pre+"dcpos[3]= %.8f %.8f %.8f" \
% (cmx.number, cmy.number, cmz.number)
print pre+"Rcore=", rcore.number
print pre+"Mlagr[9]=",
for m in mf: print "%.4f" % (m),
print ''
print pre+"Rlagr[9]=",
for r in lagr.number: print "%.8f" % (r),
print ''
sys.stdout.flush()
return E,cpu,wall
def run_ph4(infile = None, number_of_stars = 40,
end_time = 10 | nbody_system.time,
delta_t = 1 | nbody_system.time,
n_workers = 1, use_gpu = 1, gpu_worker = 1, gpu_id = -1,
accuracy_parameter = 0.1,
softening_length = -1 | nbody_system.length,
manage_encounters = 1):
if infile != None: print "input file =", infile
print "end_time =", end_time.number
print "delta_t =", delta_t.number
print "n_workers =", n_workers
print "use_gpu =", use_gpu
print "manage_encounters =", manage_encounters
print "initializing the gravity module"
sys.stdout.flush()
# Note that there are actually really three GPU options to test:
#
# 1. use the GPU code and allow GPU use (default)
# 2. use the GPU code but disable GPU use (-g)
# 3. use the non-GPU code (-G)
#print "1"; sys.stdout.flush()
gpu = 0
if gpu_worker == 1:
try:
gravity = grav(number_of_workers = n_workers,
redirection = "none", mode = "gpu")
# debugger='valgrind')
gpu = 1
except Exception as ex:
print \
'*** GPU worker code not found. Reverting to non-GPU code. ***'
gpu = 0
if gpu == 0:
gravity = grav(number_of_workers = n_workers,
redirection = "none")
# debugger='valgrind')
#print "2"; sys.stdout.flush()
gravity.initialize_code()
#print "3"; sys.stdout.flush()
gravity.parameters.set_defaults()
gravity.parameters.gpu_id = gpu_id
#-----------------------------------------------------------------
#print "4"; sys.stdout.flush()
print "making a Plummer model"
stars = new_plummer_model(number_of_stars)
id = numpy.arange(number_of_stars)
stars.id = id+1
print "setting particle masses and radii"
stars.mass = (1.0 / number_of_stars) | nbody_system.mass
if 0:
scaled_mass = new_salpeter_mass_distribution_nbody(number_of_stars)
stars.mass = scaled_mass
stars.radius = 0.0 | nbody_system.length
print "centering stars"
stars.move_to_center()
if 0:
print "scaling stars to virial equilibrium"
stars.scale_to_standard(smoothing_length_squared
= gravity.parameters.epsilon_squared)
time = 0.0 | nbody_system.time
sys.stdout.flush()
#-----------------------------------------------------------------
#print "5"; sys.stdout.flush()
if softening_length == -1 | nbody_system.length:
eps2 = 0.25*(float(number_of_stars))**(-0.666667) \
| nbody_system.length**2
else:
eps2 = softening_length*softening_length
#print "6"; sys.stdout.flush()
gravity.parameters.timestep_parameter = accuracy_parameter
gravity.parameters.epsilon_squared = eps2
gravity.parameters.use_gpu = use_gpu
gravity.parameters.manage_encounters = manage_encounters
print "adding particles"
# print stars
sys.stdout.flush()
gravity.particles.add_particles(stars)
gravity.commit_particles()
print ''
print "number_of_stars =", number_of_stars
sys.stdout.flush()
E0,cpu0,wall0 = print_log('', time, gravity)
# Channel to copy values from the code to the set in memory.
channel = gravity.particles.new_channel_to(stars)
stopping_condition = gravity.stopping_conditions.collision_detection
stopping_condition.enable()
#-----------------------------------------------------------------
cpu0 = cputime()
t0 = 0.
pi = math.pi
times = [1., 2., pi, 4*pi/3, 5., 2*pi, 2*pi + pi/100,
2*pi + pi/5, 7., 8., 3*pi, 10.]
gravity.parameters.force_sync = 1 # stays set until explicitly unset
for t in times:
time = t|nbody_system.time
print "\nEvolving to time", time
sys.stdout.flush()
gravity.parameters.block_steps = 0
gravity.parameters.total_steps = 0
gravity.evolve_model(time)
dt = t - t0
t0 = t
cpu = cputime()
dcpu = cpu - cpu0
cpu0 = cpu
# Ensure that the stars list is consistent with the internal
# data in the module.
ls = len(stars)
# Update the bookkeeping: synchronize stars with the module data.
try:
gravity.update_particle_set()
gravity.particles.synchronize_to(stars)
except:
pass
# Copy values from the module to the set in memory.
channel.copy()
# Copy the index (ID) as used in the module to the id field in
# memory. The index is not copied by default, as different
# codes may have different indices for the same particle and
# we don't want to overwrite silently.
channel.copy_attribute("index_in_code", "id")
if stopping_condition.is_set():
star1 = stopping_condition.particles(0)[0]
star2 = stopping_condition.particles(1)[0]
print '\nstopping condition set at time', \
gravity.get_time().number,'for:\n'
print star1
print ''
print star2
print ''
raise Exception("no encounter handling")
if len(stars) != ls:
if 0:
print "stars:"
for s in stars:
print " ", s.id.number, s.mass.number, \
s.x.number, s.y.number, s.z.number
else:
print "number of stars =", len(stars)
sys.stdout.flush()
print_log('', time, gravity, E0, cpu0, wall0)
print '@@@'
print '@@@ t =', time.number, ' dt =', dt
print '@@@ sync_time =', gravity.parameters.sync_time.number
print '@@@ dcpu/dt =', dcpu/dt
nb = gravity.parameters.block_steps
ns = gravity.parameters.total_steps
print '@@@ d(block_steps) =', nb, ' #/dt =', nb/dt
print '@@@ d(total steps) =', ns, ' #/dt =', ns/dt
#print stars
sys.stdout.flush()
#-----------------------------------------------------------------
print ''
gravity.stop()
if __name__ == '__main__':
infile = None
N = 100
t_end = 5.0 | nbody_system.time
delta_t = 1.0 | nbody_system.time
n_workers = 1
use_gpu = 1
gpu_worker = 1
gpu_id = -1
accuracy_parameter = 0.1
softening_length = 0.01 | nbody_system.length
random_seed = -1
manage_encounters = 1
try:
opts, args = getopt.getopt(sys.argv[1:], "a:c:d:e:f:gGi:n:s:t:w:")
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
for o, a in opts:
if o == "-a":
accuracy_parameter = float(a)
elif o == "-c":
manage_encounters = int(a)
elif o == "-d":
delta_t = float(a) | nbody_system.time
elif o == "-e":
softening_length = float(a) | nbody_system.length
elif o == "-f":
infile = a
elif o == "-g":
use_gpu = 0
elif o == "-G":
use_gpu = 0
gpu_worker = 0
elif o == "-i":
gpu_id = int(a)
elif o == "-n":
N = int(a)
elif o == "-s":
random_seed = int(a)
elif o == "-t":
t_end = float(a) | nbody_system.time
elif o == "-w":
n_workers = int(a)
else:
print "unexpected argument", o
if random_seed <= 0:
numpy.random.seed()
random_seed = numpy.random.randint(1, pow(2,31)-1)
numpy.random.seed(random_seed)
print "random seed =", random_seed
#os.system('env')
assert is_mpd_running()
run_ph4(infile, N, t_end, delta_t, n_workers,
use_gpu, gpu_worker, gpu_id,
accuracy_parameter, softening_length,
manage_encounters)
```
#### File: ph4/util/run_ph4.py
```python
import collections
import getopt
import numpy
import os
import random
import sys
import pickle
import math
import unittest
from time import clock
from amuse.community.ph4.interface import ph4 as grav
from amuse.community.smalln.interface import SmallN
from amuse.community.kepler.interface import Kepler
from amuse.couple import multiples
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import quantities
from amuse import datamodel
from amuse.datamodel import particle_attributes as pa
from amuse.rfi.core import is_mpd_running
from amuse.ic.plummer import new_plummer_model
from amuse.ic.salpeter import new_salpeter_mass_distribution_nbody
from amuse import io
from utils import *
def handle_callback (time, star1, star2):
print ''
print ' callback'
print ' ', time
print ' ', star1
print ' ', star2
print ''
return True # NOTE: returning False will skip this encounter
def run_ph4(initial_file = None,
end_time = 0 | nbody_system.time,
input_delta_t = 0.0 | nbody_system.time,
input_Delta_t = 1.0 | nbody_system.time,
input_timestep_parameter = 0.0,
input_softening_length = -1.0 | nbody_system.length,
n_workers = 1, use_gpu = 1, gpu_worker = 1,
use_multiples = True,
save_restart = False,
strict_restart = False
):
# Read an N-body system from a file and run it to the specified
# time using the specified steps. Print log information and
# optionally save a restart file after every step. If the
# specified time is less than the time in the initial file, don't
# take a step, but still print out the log info. (Hence run_ph4
# also functions like Starlab sys_stats.)
print "initial_file =", initial_file
print "end_time =", end_time.number
print "n_workers =", n_workers
print "use_gpu =", use_gpu
print "use_multiples =", use_multiples
print "save_restart =", save_restart
print "strict_restart =", strict_restart
print "\ninitializing the gravity module"
sys.stdout.flush()
init_smalln()
# Note that there are actually three GPU options:
#
# 1. use the GPU code and allow GPU use (default)
# 2. use the GPU code but disable GPU use (-g)
# 3. use the non-GPU code (-G)
if gpu_worker == 1:
try:
gravity = grav(number_of_workers = n_workers,
redirection = "none", mode = "gpu")
except Exception as ex:
gravity = grav(number_of_workers = n_workers,
redirection = "none")
else:
gravity = grav(number_of_workers = n_workers,
redirection = "none")
gravity.initialize_code()
gravity.parameters.set_defaults()
kep = Kepler(None, redirection = "none")
kep.initialize_code()
stars, time, delta_t, E0, cpu0, multiples_code \
= read_state_from_file(initial_file, gravity, kep)
# Allow overrides of the restored data (OK for delta_t, NOT
# recommended for timestep_parameter or softening_length). Note
# that reading the state also commits the particles, and hence
# calculates the initial time steps. Probably should reinitialize
# if timestep_parameter or softening_length are changed. TODO
if input_delta_t.number > 0:
if input_delta_t != delta_t:
print 'modifying delta_t from stored', delta_t, \
'to input', input_delta_t
delta_t = input_delta_t
else:
print "using stored delta_t =", delta_t
print input_timestep_parameter
print gravity.parameters.timestep_parameter
if input_timestep_parameter > 0:
if input_timestep_parameter != gravity.parameters.timestep_parameter:
print 'modifying timestep_parameter from stored', \
gravity.parameters.timestep_parameter, \
'to input', input_timestep_parameter
gravity.parameters.timestep_parameter \
= input_timestep_parameter
else:
print 'timestep_parameter =', gravity.parameters.timestep_parameter
if input_softening_length.number >= 0:
if input_softening_length*input_softening_length \
!= gravity.parameters.epsilon_squared:
print 'modifying softening_length from stored', \
gravity.parameters.epsilon_squared.sqrt(), \
'to input', input_softening_length
gravity.parameters.epsilon_squared \
= softening_length*softening_length
else:
print 'softening length =', gravity.parameters.epsilon_squared.sqrt()
gravity.parameters.use_gpu = use_gpu
gravity.parameters.begin_time = time
if 0:
print ''
print gravity.parameters.begin_time
print stars.mass
#print stars.position
for s in stars:
print '%.18e %.18e %.18e' % (s.x.number, s.y.number, s.z.number)
print stars.velocity
channel = gravity.particles.new_channel_to(stars)
if use_multiples:
stopping_condition = gravity.stopping_conditions.collision_detection
stopping_condition.enable()
gravity.parameters.force_sync = 1 # end exactly at the specified time
pre = "%%% "
print_log(pre, time, multiples_code, E0, cpu0)
tsave = time + Delta_t
save_file = ''
while time < end_time:
time += delta_t
multiples_code.evolve_model(time) #, callback=handle_callback)
# Copy values from the module to the set in memory.
channel.copy()
# Copy the index (ID) as used in the module to the id field in
# memory. The index is not copied by default, as different
# codes may have different indices for the same particle and
# we don't want to overwrite silently.
channel.copy_attribute("index_in_code", "id")
# Write log information.
print_log(pre, time, multiples_code, E0, cpu0)
sys.stdout.flush()
# Optionally create a restart file.
if save_restart and time >= tsave:
#save_file = 't='+'{:07.2f}'.format(time.number) # not in Python 2.6
save_file = 't=%07.2f'%time.number
write_state_to_file(time, stars, gravity, multiples_code,
save_file, delta_t, E0, cpu0)
sys.stdout.flush()
tsave += Delta_t
if strict_restart: break
gravity.stop()
kep.stop()
stop_smalln()
return time, save_file
def print_help():
print "Options:"
print " -d set log output interval [1.0]"
print " -h print this help message"
print " -i set initial file name [t=0000.0]"
print " -m suppress multiples [False]"
print " -s save restart files [False]"
print " -t set final time [0.0]"
if __name__ == '__main__':
initial_file = 't=0000.0'
t_end = 0.0 | nbody_system.time # default is to print log and exit
delta_t = 0.0 | nbody_system.time # log output time scale
Delta_t = 1.0 | nbody_system.time # restart output time scale
Delta_t_set = False
timestep_parameter = -1
softening_length = -1 | nbody_system.length
n_workers = 1
use_gpu = 1
gpu_worker = 1
use_multiples = True
save_restart = False
strict_restart = False
try:
opts, args = getopt.getopt(sys.argv[1:], "d:D:hi:msSt:")
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
for o, a in opts:
if o == "-d":
delta_t = float(a) | nbody_system.time
elif o == "-D":
Delta_t = float(a) | nbody_system.time
Delta_t_set = True
elif o == "-h":
print_help()
sys.exit(1)
elif o == "-i":
initial_file = a
elif o == "-m":
use_multiples = False
elif o == "-s":
save_restart = True
elif o == "-S":
strict_restart = True
save_restart = True
elif o == "-t":
t_end = float(a) | nbody_system.time
else:
print "unexpected argument", o
print_help()
sys.exit(1)
if not save_restart: strict_restart = False
if strict_restart: save_restart = True
if not Delta_t_set: Delta_t = delta_t
# Code implicitly assumes that delta_t and Delta_t are
# commensurate. Check that here.
if math.fmod(Delta_t.number, delta_t.number) != 0:
x = math.floor(Delta_t/delta_t)
Delta_t = x*delta_t
print 'reset Delta_t to', Delta_t
assert is_mpd_running()
# In non-strict_mode, OK to let run_ph4 loop over steps. If
# strict_restart is True, handle the loop here and force a new
# restart at every step -- seems substantially slower, but
# should be reproducible.
if (not strict_restart):
run_ph4(initial_file, t_end, delta_t, Delta_t,
timestep_parameter, softening_length,
n_workers, use_gpu, gpu_worker,
use_multiples,
save_restart, strict_restart)
else:
t = -1.0 | nbody_system.time
while t < t_end:
t, initial_file = run_ph4(initial_file, t_end, delta_t, Delta_t,
timestep_parameter, softening_length,
n_workers, use_gpu, gpu_worker,
use_multiples,
save_restart, strict_restart)
delta_t = 0.0 | nbody_system.time
timestep_parameter = -1
softening_length = -1 | nbody_system.length
print ''
```
#### File: community/sei/test_sei.py
```python
from amuse.test.amusetest import TestWithMPI
from amuse.units import nbody_system
from amuse.units import units
import os
import sys
import numpy
import math
from amuse.community.sei.interface import SeiInterface
from amuse.community.sei.interface import Sei
from amuse import datamodel
class TestSeiInterface(TestWithMPI):
def test0(self):
instance = SeiInterface()
instance.initialization()
instance.set_state(0,1,0,0,0,0,0)
for i in range(0,10):
instance.evolve(i)
print instance.get_state(0)
instance.stop()
class TestSei(TestWithMPI):
def test0(self):
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 1 | units.AU)
particle = datamodel.Particles(1)
particle.position = [1.0, 0.0, 0.0,]|units.AU
particle.velocity = [0.0, 2.0*3.1415926535*1.0/365, 0.0] | units.AUd
sei = Sei(convert_nbody)
sei.initialization()
sei.particles.add_particles(particle)
print sei.particles.position.x.value_in(units.AU)
for i in range(365):
sei.evolve_model(i|units.day)
print sei.particles.position.x.value_in(units.AU)
```
#### File: amuse/ext/basicgraph.py
```python
class UnionFind(object):
"""Union-find data structure.
Each unionFind instance X maintains a family of disjoint sets of
hashable objects, supporting the following two methods:
- X[item] returns a name for the set containing the given item.
Each set is named by an arbitrarily-chosen one of its members; as
long as the set remains unchanged it will keep the same name. If
the item is not yet part of a set in X, a new singleton set is
created for it.
- X.union(item1, item2, ...) merges the sets containing each item
into a single larger set. If any item is not yet part of a set
in X, it is added to X as one of the members of the merged set.
"""
def __init__(self):
"""Create a new empty union-find structure."""
self.weights = {}
self.parents = {}
def __getitem__(self, object):
"""Find and return the name of the set containing the object."""
# check for previously unknown object
if object not in self.parents:
self.parents[object] = object
self.weights[object] = 1
return object
# find path of objects leading to the root
path = [object]
root = self.parents[object]
while root != path[-1]:
path.append(root)
root = self.parents[root]
# compress the path and return
for ancestor in path:
self.parents[ancestor] = root
return root
def __iter__(self):
"""Iterate through all items ever found or unioned by this structure."""
return iter(self.parents)
def union(self, *objects):
"""Find the sets containing the objects and merge them all."""
roots = [self[x] for x in objects]
heaviest = max([(self.weights[r],r) for r in roots], key = lambda x: x[0])[1]
for r in roots:
if r != heaviest:
self.weights[heaviest] += self.weights[r]
self.parents[r] = heaviest
def sets(self):
sets={}
for v in self.parents:
sets.setdefault(self[v],set()).add(v)
return sets.values()
class Graph(dict):
def add_edge(self, n1, n2, w):
if callable(w): w=w(n1,n2)
self.setdefault(n1, {}).update({n2: w})
self.setdefault(n2, {}).update({n1: w})
def remove_edge(self, n1, n2):
self[n1].pop(n2)
self[n2].pop(n1)
def add_node(self,n):
self.setdefault(n, {})
def all_edges(self):
return [(self[u][v],u,v) for u in self for v in self[u]]
def MinimumSpanningTree(G):
"""
Return the minimum spanning tree of an undirected graph G.
G should be represented in such a way that G[u][v] gives the
length of edge u,v, and G[u][v] should always equal G[v][u].
The tree is returned as a list of edges.
"""
# Kruskal's algorithm: sort edges by weight, and add them one at a time.
# We use Kruskal's algorithm, first because it is very simple to
# implement once UnionFind exists, and second, because the only slow
# part (the sort) is sped up by being built in to Python.
subtrees = UnionFind()
tree = []
edges = [(G[u][v],u,v) for u in G for v in G[u]]
edges.sort(key=lambda x:x[0])
for W,u,v in edges:
if subtrees[u] != subtrees[v]:
tree.append((W,u,v))
subtrees.union(u,v)
return tree
def MinimumSpanningTreeFromEdges(edges):
"""
Return the minimum spanning tree of an undirected graph G.
This version runs directly from an edgelist. An edge is a triple
(w,u,v), such that u,v are nodes, w is the length of the edge.
The tree is returned as a list of edges.
"""
# Kruskal's algorithm: sort edges by weight, and add them one at a time.
# We use Kruskal's algorithm, first because it is very simple to
# implement once UnionFind exists, and second, because the only slow
# part (the sort) is sped up by being built in to Python.
subtrees = UnionFind()
tree = []
edges.sort(key=lambda x:x[0])
for W,u,v in edges:
if subtrees[u] != subtrees[v]:
tree.append((W,u,v))
subtrees.union(u,v)
return tree
def ConnectedComponents(G):
"""
Return the connected components of a graph. G should be
represented in such a way that G[u] gives the edges from u in a way
that and if v in G[u] than u in G[v]. the connected components are
returned as sets of nodes.
"""
u=UnionFind()
for v in G:
nset=set(G[v])
nset.add(v)
u.union(*nset)
return u.sets()
def ConnectedComponentsFromEdges(edges):
"""
Return the connected components of a graph from a list of egdes.
the connected components are returned as sets of nodes. note this does
not find singletons.
"""
u=UnionFind()
for e in edges:
u.union(e[1],e[2])
return u.sets()
if __name__=="__main__":
graph = Graph()
graph.add_edge(0, 1, 1.0)
graph.add_edge(1, 2, 1.0)
graph.add_edge(2, 0, 1.0)
graph.add_edge(3, 4, 1.0)
graph.add_edge(4, 5, 1.0)
graph.add_edge(5, 3, 1.0)
print graph[0]
first, second = ConnectedComponents(graph)
print first
print second
print MinimumSpanningTree(graph)
```
#### File: amuse/ext/cosmo.py
```python
import numpy
from amuse.units import units, generic_unit_system
from amuse.units.quantities import to_quantity
from amuse.datamodel import Particles
from amuse.support.exceptions import AmuseException
def findbin(ylist,y):
s=1
if ylist[0]>=ylist[-1]:
s=-1
if s*y <= s*ylist[0]:
return -1
if s*y >= s*ylist[-1]:
return len(ylist)
up=len(ylist)-1
low=0
while up-low>1:
b=(low+up)/2
if s*y < s*ylist[b]:
up=b
else:
low=b
return up
class Hermitelookup(object):
def __init__(self,xlist,ylist,yderiv):
self.xlist=xlist
self.ylist=ylist
self.yderiv=yderiv
def interpolatecubic(self,x, b):
if b <= 0:
return self.ylist[0]
if b > len(self.ylist)-1:
return self.ylist[-1]
dx=self.xlist[b]-self.xlist[b-1]
dy=self.ylist[b]-self.ylist[b-1]
if dx==0.:
return (self.ylist[b-1]+self.ylist[b])/2
y1=self.ylist[b-1]
yd2=self.yderiv[b]
yd1=self.yderiv[b-1]
u=(x-self.xlist[b-1])/(self.xlist[b]-self.xlist[b-1])
return u**3*(-2*dy+dx*(yd1+yd2))+u**2*(3*dy-dx*(2*yd1+yd2))+dx*yd1*u+y1
def evaluate(self,x):
return self.interpolatecubic(x,findbin(self.xlist,x))
class Cosmology(object):
def __init__(self, # default=fifth year wmap+BAO+SN parameters, hinshaw 2008
omega=1.,
omegal = 0.726,
omegak = 0.,
omegar = 8.37e-5, # 4.165E-5/(h*h) includes 3 massless neutrino species, T0 = 2.72528
h = 0.705,
sigma8 = 0.812,
n=1000,amax=1.):
self.omega=omega
self.omegal=omegal
self.omegar=omegar
self.omegak=omegak
self.hubble0=h*(100 | units.kms/units.Mpc)
self.omegam = omega - (omegak + omegar + omegal)
self.n=n
a=amax*(numpy.array(range(self.n+1))/float(self.n))**2
t=[0.]
dtda=[0.]
dadt=[0.]
for i in range(1,self.n+1):
_t=t[-1]+1./6.*( self.invfriedmanint(a[i])+
self.invfriedmanint(a[i-1])+
4*self.invfriedmanint((a[i]+a[i-1])/2) )*(a[i]-a[i-1])
t.append( _t )
dtda.append(self.invfriedmanint(a[i]))
dadt.append(1./dtda[-1])
self.a=a
self.t=numpy.array(t)
self.dtda=numpy.array(dtda)
self.dadt=numpy.array(dadt)
self.age_lookup=Hermitelookup(self.a,self.t,self.dtda)
self.a_lookup=Hermitelookup(self.t,self.a,self.dadt)
def invfriedmanint(self,a):
return a/(self.omegam*a+self.omegar+self.omegal*a**4+self.omegak*a**2)**0.5
def hubble(self,a):
return self.hubble0*self.dadtau(a)/a
def dadtau(self,a):
return (self.omegam/a+self.omegar/a**2+self.omegal*a**2+self.omegak)**0.5
def d2adtau2(self,a):
return -1./2.*self.omegam/a**2-self.omegar/a**3+self.omegal*a
def agefromz(self,z):
return self.agefroma(1./(z+1.))
def taufromz(self,z):
return self.taufroma(1./(z+1.))
def agefroma(self,a):
return (self.age_lookup.evaluate(a)/self.hubble0)
def taufroma(self,a):
return self.age_lookup.evaluate(a)
def afromage(self,age):
return self.a_lookup.evaluate(age*self.hubble0)
def afromtau(self,tau):
return self.a_lookup.evaluate(tau)
def convert_comoving_to_physical(original, redshift=0.0, hubble_parameter=1.0, attribute_names=None):
"""
Converts quantities or particle sets from comoving coordinates to physical
coordinates. In comoving coordinates, changes in positions due to the
expansion of the universe are corrected for, by dividing by the scale
factor 'a':
a = 1 / (1 + z).
This function will undo this correction for all quantities with units that
are (derived from) length units (e.g. position, velocity, energy, but not
time or mass).
Optionally, a value for the Hubble parameter (value of Hubble constant in
units of 100 km/s/Mpc) can be supplied. If so, the units of 'original' are
assumed to be based on length/h, mass/h, and time/h, instead of length,
mass, and time, respectively. These factors will be divided out in the result
"""
if isinstance(original, Particles):
copy = original.copy()
if attribute_names is None:
attribute_names = copy.get_attribute_names_defined_in_store()
for attribute in attribute_names:
setattr(copy, attribute, convert_quantity_from_comoving_to_physical(
getattr(copy, attribute), redshift, hubble_parameter))
return copy
elif hasattr(original, "unit"):
return convert_quantity_from_comoving_to_physical(original, redshift, hubble_parameter)
else:
raise AmuseException("Can't convert instance of {0} from comoving to physical "
"coordinates (only Particles or Quantity supported)".format(original.__class__))
def convert_quantity_from_comoving_to_physical(original, redshift, hubble_parameter=1.0):
for (exponent, unit) in to_quantity(original).unit.base:
if unit is units.m or unit is generic_unit_system.length:
return original * (1 / (1.0 + redshift))**exponent
return original
if __name__=="__main__":
cosmo=Cosmology(amax=2)
print cosmo.agefromz(0.).in_(units.Myr)
print cosmo.agefroma(1.).in_(units.Gyr)
print cosmo.afromage(cosmo.agefroma(1.5))
```
#### File: amuse/ext/static_potentials.py
```python
import numpy
from amuse.units import units, constants, quantities
from amuse.datamodel import Particle, Particles
from amuse.support.exceptions import AmuseException
from StringIO import StringIO
class Abstract_Potential(object):
def get_gravity_at_point(self, eps, x,y,z):
""" derive the gravity from the potential """
phi_0 = self.get_potential_at_point(eps, x,y,z)
dpos = 0.001*(x**2+y**2+z**2).sqrt()
phi_dx = self.get_potential_at_point(0,x+dpos,y,z) - phi_0
phi_dy = self.get_potential_at_point(0,x,y+dpos,z) - phi_0
phi_dz = self.get_potential_at_point(0,x,y, z+dpos) - phi_0
return phi_dx/dpos, phi_dy/dpos, phi_dz/dpos
def get_potential_at_point(self, eps, x, y, z):
""" Abstract function, to be overwritten by subclass """
pass
def flattened_potential(self, x, y, z, a, b, mass):
"""
Following eq. 2.69a of B&T
a=0 gives plummer potential
b=0 gives Kuzmin's potential for razor-thin disc
"""
r_squared = x**2+y**2
return constants.G * mass / (r_squared + (a + (z**2 + b**2).sqrt())**2).sqrt()
def power_law_potential(self, r, alpha, r_0, mass_0):
""" Following eq. 2.62 of B&T """
rho_0 = mass_0 / (4./3. * numpy.pi * r_0**3)
phi_0 = - constants.G * mass_0 / r_0
v_circ_squared = 4 * numpy.pi * constants.G * rho_0 * r_0**alpha / (3 - alpha)
if alpha == 2:
phi_minus_phi_0 = - v_circ_squared * numpy.log(r/r_0)
else:
phi_minus_phi_0 = - v_circ_squared * (r_0**(2-alpha) - r**(2-alpha))/(alpha-2)
return phi_minus_phi_0 + phi_0
def point_mass_potential(self, r, mass):
""" See eq. 2.34 of B&T """
return -constants.G * mass / r
def point_mass_gravity(self, r, mass, unit_vector):
""" See eq. 2.27a of B&T """
return -constants.G * mass / r**2 * unit_vector
class Disc_Bulge_Halo_Potential(Abstract_Potential):
def halo_potential(self, x,y,z, Mc, Rc):
""" TODO: Find the source for this potential -> McMillan & <NAME> 2000?"""
r=(x**2+y**2+z**2).sqrt()
rr = (r/Rc)
return -constants.G * (Mc/Rc)*(0.5*numpy.log(1 +rr**2) + numpy.arctan(rr)/rr)
def get_potential_at_point(self, eps, x, y, z):
disk = self.flattened_potential(x,y,z,
0.0|units.kpc, 0.277|units.kpc, 1.12E+10|units.MSun)
bulge = self.flattened_potential(x,y,z,
3.7|units.kpc, 0.20|units.kpc, 8.07E+10|units.MSun)
halo = self.halo_potential(x,y,z,
Mc=5.0E+10|units.MSun, Rc=6.0|units.kpc)
return disk + bulge + halo
class Galactic_Center_Potential_Kruijssen(Abstract_Potential):
"""
Following Kruijssen et al 2014, which uses the enclosed mass
profile from Launhardt et al 2002.
Note that the mass profile only extends to 487.9 parsec from
the galactic center, (and only to 487.9 * 0.63 parsec in the z direction).
Outside this range, a different potential should be used.
"""
def __init__(self, q=0.63):
self.q = q
self.load_table()
def load_table(self, rescale=True):
table = """
# enclosed mass profile from Launhardt et al 2002,
# recreated and provided by Kruijssen 2014
# radius enclosed mass
# (parsec) (MSun)
0.0 0.0
0.6261 3298000
0.6945 3429000
0.7751 3636000
0.8756 3855000
0.9712 4088000
1.077 4335000
1.187 4552000
1.293 4826000
1.408 5019000
1.534 5374000
1.671 5754000
1.820 6101000
1.994 6469000
2.198 6995000
2.424 7563000
2.705 8178000
2.964 8842000
3.268 9561000
3.625 1.033E7
3.996 1.128E7
4.406 1.232E7
4.798 1.332E7
5.131 1.413E7
5.487 1.498E7
6.013 1.558E7
6.752 1.635E7
7.489 1.717E7
8.409 1.803E7
9.327 1.894E7
10.28 2.028E7
11.54 2.214E7
13.04 2.441E7
14.91 2.718E7
17.16 3.026E7
19.98 3.402E7
22.99 3.939E7
26.13 4.516E7
29.35 5.229E7
32.35 5.995E7
35.02 6.806E7
38.61 8.036E7
43.09 9.865E7
47.51 1.199E8
51.74 1.429E8
57.75 1.789E8
64.84 2.329E8
71.92 2.973E8
81.25 3.947E8
91.79 5.188E8
99.97 6.246E8
109.5 7.743E8
120.0 9.142E8
133.9 1.068E9
152.2 1.237E9
179.5 1.489E9
206.5 1.741E9
243.4 2.056E9
283.5 2.289E9
332.3 2.573E9
382.3 2.893E9
413.8 3.098E9
456.2 3.449E9
487.9 3.694E9
1e6 3.694E9
"""
stream = StringIO(table)
radius, enclosed_mass = numpy.loadtxt(stream, unpack=True)
if rescale:
""" See footnote 18 at the bottom of page 1076 of Kruijssen """
factor = 8.3/8.5
radius *= factor
enclosed_mass *= factor**2
self.radius = radius | units.parsec
self.enclosed_mass_profile = enclosed_mass | units.MSun
def enclosed_mass(self, r):
try:
index = quantities.searchsorted(self.radius, r)
except ValueError:
""" This error is usually thrown when r has dimension > 1 """
shape = r.shape
r_flat = r.flatten()
index = quantities.searchsorted(self.radius, r_flat)
index.reshape(shape)
mass_below = self.enclosed_mass_profile[index-1]
mass_above = self.enclosed_mass_profile[index]
radius_below = self.radius[index-1]
radius_above = self.radius[index]
# Linear interpolation in log space
log_m_over_mb = numpy.log(mass_above/mass_below) * numpy.log(r/radius_below) / numpy.log(radius_above/radius_below)
enclosed_mass = numpy.nan_to_num(numpy.exp(log_m_over_mb)) * mass_below
return enclosed_mass
def get_potential_at_point(self, eps, x, y, z):
"""
Note that this potential is not entirely consistent with
get_gravity_at_point (which should be used) because
there a second coordinate transformation is used to flatten
the "potential".
"""
r = (x**2 + y**2 + z**2/self.q**2).sqrt()
mass = self.enclosed_mass(r)
return self.point_mass_potential(r, mass)
def get_gravity_at_point(self, eps, x,y,z):
"""
Overwrites the default to add a second coordinate transformation.
"""
r = (x**2 + y**2 + z**2/self.q**2).sqrt()
mass = self.enclosed_mass(r)
unit_vector = []|x.unit
for var in (x, y, z):
unit_vector.append(var)
unit_vector = unit_vector / (x**2 + y**2 + z**2).sqrt()
unit_vector[2] *= 1./self.q**2
return self.point_mass_gravity(r, mass, unit_vector)
class Position_In_Potential(Abstract_Potential):
"""
Wrapper around any other potential that has a test particle.
Any call to get_potential_at_point will shift the coordinates to
put the center on the location of that test particle.
The particle is put in a Particles set to allow channels to be used,
however, only a single particle is allowed at a time.
"""
def __init__(self, potential, particle=None):
self.potential = potential
if particle is None:
particle = Particle()
particle.position = [0., 0., 0.] | units.parsec
particle.velocity = [0., 0., 0.] | units.kms
self.particles = Particles()
self.particles.add_particle(particle)
@property
def particle(self):
return self.particles[0]
@particle.setter
def particle(self, particle):
self.particles.remove_particles(self.particles)
self.particles.add_particle(particle)
def get_potential_at_point(self, eps, x, y, z):
px, py, pz = self.particle.position
return self.potential.get_potential_at_point(eps, x+px, y+py, z+pz)
```
#### File: amuse/ic/plummer.py
```python
import numpy
import numpy.random
from math import pi, sqrt
from amuse.units import nbody_system
from amuse import datamodel
__all__ = ["new_plummer_sphere", "new_plummer_model"]
class MakePlummerModel(object):
def __init__(self, number_of_particles, convert_nbody = None, radius_cutoff = 22.8042468, mass_cutoff = 0.999,
do_scale = False, random_state = None, random = None):
self.number_of_particles = number_of_particles
self.convert_nbody = convert_nbody
self.mass_cutoff = min(mass_cutoff, self.calculate_mass_cuttof_from_radius_cutoff(radius_cutoff))
self.do_scale = do_scale
if not random_state == None:
print "DO NOT USE RANDOM STATE"
self.random_state = None
if random is None:
self.random = numpy.random
else:
self.random = random
def calculate_mass_cuttof_from_radius_cutoff(self, radius_cutoff):
if radius_cutoff > 99999:
return 1.0
scale_factor = 16.0 / (3.0 * pi)
rfrac = radius_cutoff * scale_factor
denominator = pow(1.0 + rfrac ** 2, 1.5)
numerator = rfrac ** 3
return numerator/denominator
def calculate_radius(self, index):
mass_min = (index * self.mass_cutoff) / self.number_of_particles
mass_max = ((index+1) * self.mass_cutoff) / self.number_of_particles
random_mass_fraction = self.random.uniform(mass_min, mass_max)
radius = 1.0 / sqrt( pow (random_mass_fraction, -2.0/3.0) - 1.0)
return radius
def calculate_radius_uniform_distribution(self):
return 1.0 / numpy.sqrt( numpy.power(self.random.uniform(0,self.mass_cutoff,(self.number_of_particles,1)), -2.0/3.0) - 1.0)
def new_positions_spherical_coordinates(self):
pi2 = pi * 2
radius = self.calculate_radius_uniform_distribution()
theta = numpy.arccos(self.random.uniform(-1.0,1.0, (self.number_of_particles,1)))
phi = self.random.uniform(0.0,pi2, (self.number_of_particles,1))
return (radius,theta,phi)
def new_velocities_spherical_coordinates(self, radius):
pi2 = pi * 2
x,y = self.new_xy_for_velocity()
velocity = x * sqrt(2.0) * numpy.power( 1.0 + radius*radius, -0.25)
theta = numpy.arccos(self.random.uniform(-1.0,1.0, (self.number_of_particles,1)))
phi = self.random.uniform(0.0,pi2, (self.number_of_particles,1))
return (velocity,theta,phi)
def coordinates_from_spherical(self, radius, theta, phi):
x = radius * numpy.sin( theta ) * numpy.cos( phi )
y = radius * numpy.sin( theta ) * numpy.sin( phi )
z = radius * numpy.cos( theta )
return (x,y,z)
def new_xy_for_velocity(self):
number_of_selected_items = 0
selected_values_for_x = numpy.zeros(0)
selected_values_for_y = numpy.zeros(0)
while (number_of_selected_items < self.number_of_particles):
x = self.random.uniform(0,1.0, (self.number_of_particles-number_of_selected_items))
y = self.random.uniform(0,0.1, (self.number_of_particles-number_of_selected_items))
g = (x**2) * numpy.power(1.0 - x**2, 3.5)
compare = y <= g
selected_values_for_x = numpy.concatenate((selected_values_for_x, x.compress(compare)))
selected_values_for_y= numpy.concatenate((selected_values_for_x, y.compress(compare)))
number_of_selected_items = len(selected_values_for_x)
return numpy.atleast_2d(selected_values_for_x).transpose(), numpy.atleast_2d(selected_values_for_y).transpose()
def new_model(self):
m = numpy.zeros((self.number_of_particles,1)) + (1.0 / self.number_of_particles)
radius, theta, phi = self.new_positions_spherical_coordinates()
position = numpy.hstack(self.coordinates_from_spherical(radius, theta, phi))
radius, theta, phi = self.new_velocities_spherical_coordinates(radius)
velocity = numpy.hstack(self.coordinates_from_spherical(radius, theta, phi))
position = position / 1.695
velocity = velocity / sqrt(1 / 1.695)
return (m, position, velocity)
@property
def result(self):
masses = numpy.ones(self.number_of_particles) / self.number_of_particles
radius, theta, phi = self.new_positions_spherical_coordinates()
x,y,z = self.coordinates_from_spherical(radius, theta, phi)
radius, theta, phi = self.new_velocities_spherical_coordinates(radius)
vx,vy,vz = self.coordinates_from_spherical(radius, theta, phi)
result = datamodel.Particles(self.number_of_particles)
result.mass = nbody_system.mass.new_quantity(masses)
result.x = nbody_system.length.new_quantity(x.reshape(self.number_of_particles)/1.695)
result.y = nbody_system.length.new_quantity(y.reshape(self.number_of_particles)/1.695)
result.z = nbody_system.length.new_quantity(z.reshape(self.number_of_particles)/1.695)
result.vx = nbody_system.speed.new_quantity(vx.reshape(self.number_of_particles) / sqrt(1/1.695))
result.vy = nbody_system.speed.new_quantity(vy.reshape(self.number_of_particles) / sqrt(1/1.695))
result.vz = nbody_system.speed.new_quantity(vz.reshape(self.number_of_particles) / sqrt(1/1.695))
result.radius = 0 | nbody_system.length
result.move_to_center()
if self.do_scale:
result.scale_to_standard()
if not self.convert_nbody is None:
result = datamodel.ParticlesWithUnitsConverted(result, self.convert_nbody.as_converter_from_si_to_generic())
result = result.copy()
return result
def new_plummer_model(number_of_particles, *list_arguments, **keyword_arguments):
"""
Create a plummer sphere with the given number of particles. Returns
a set of stars with equal mass and positions and velocities distributed
to fit a plummer star distribution model. The model is centered around the
origin. Positions and velocities are optionally scaled such that the kinetic and
potential energies are 0.25 and -0.5 in nbody-units, respectively.
:argument number_of_particles: Number of particles to include in the plummer sphere
:argument convert_nbody: When given will convert the resulting set to SI units
:argument radius_cutoff: Cutoff value for the radius (defaults to 22.8042468)
:argument mass_cutoff: Mass percentage inside radius of 1
:argument do_scale: scale the result to exact nbody units (M=1, K=0.25, U=-0.5)
"""
uc = MakePlummerModel(number_of_particles, *list_arguments, **keyword_arguments)
return uc.result
new_plummer_sphere = new_plummer_model
```
#### File: src/amuse/__init__.py
```python
import numpy
def numpy_fix():
try:
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
numpy_fix()
```
#### File: amuse/rfi/nospawn.py
```python
from amuse.rfi import core
from amuse.rfi.python_code import CythonImplementation
from mpi4py import MPI
from amuse.rfi import channel
from collections import namedtuple
import sys
import importlib
Code = namedtuple("Code", ['cls', 'number_of_workers', 'args', 'kwargs'])
PythonCode = namedtuple("Code", ['cls', 'number_of_workers', 'args', 'kwargs', 'implementation_factory'])
def get_number_of_workers_needed(codes):
result = 1
for x in codes:
result += x.number_of_workers
return result
def get_color(rank, codes):
if rank == 0:
return 0
else:
index = 1
for color, x in enumerate(codes):
if rank >= index and rank < index + x.number_of_workers:
return color + 1
index += x.number_of_workers
return len(codes) + 1 #left over ranks
def get_key(rank, codes):
if rank == 0:
return 0
else:
index = 1
for color, x in enumerate(codes):
if rank >= index and rank < index + x.number_of_workers:
return rank - index
index += x.number_of_workers
return rank - (len(codes) + 1) #left over ranks
def get_code_class(rank, codes):
if rank == 0:
return None
else:
index = 1
for color, x in enumerate(codes):
if rank >= index and rank < index + x.number_of_workers:
return x.cls
index += x.number_of_workers
return None
def start_all(codes):
channel.MpiChannel.ensure_mpi_initialized()
number_of_workers_needed = get_number_of_workers_needed(codes)
world = MPI.COMM_WORLD
rank = world.rank
if world.size < number_of_workers_needed:
if rank == 0:
raise Exception("cannot start all codes, the world size ({0}) is smaller than the number of requested codes ({1}) (which is always 1 + the sum of the all the number_of_worker fields)".format(world.size, number_of_workers_needed))
else:
return None
color = get_color(world.rank, codes)
key = get_key(world.rank, codes)
newcomm = world.Split(color, key)
localdup = world.Dup()
if world.rank == 0:
result = []
remote_leader = 1
tag = 1
for x in codes:
new_intercomm = newcomm.Create_intercomm(0, localdup, remote_leader, tag)
remote_leader += x.number_of_workers
tag += 1
instance = x.cls(*x.args, check_mpi = False, must_start_worker = False, **x.kwargs)
instance.legacy_interface.channel = channel.MpiChannel('_',None)
instance.legacy_interface.channel.intercomm = new_intercomm
result.append(instance)
world.Barrier()
return result
else:
code_cls = get_code_class(world.rank, codes)
if code_cls is None:
world.Barrier()
return None
new_intercomm = newcomm.Create_intercomm(0, localdup, 0, color)
x = get_code(world.rank, codes)
instance = code_cls(*x.args, check_mpi = False, must_start_worker = False, **x.kwargs)
interface = instance.legacy_interface
if hasattr(interface, '__so_module__'):
package, _ = code_cls.__module__.rsplit('.',1)
modulename = package + '.' + interface.__so_module__
module = importlib.import_module(modulename)
module.set_comm_world(newcomm)
else:
module = x.implementation_factory()
instance = CythonImplementation(module, interface.__class__)
instance.intercomm = new_intercomm
instance.must_disconnect = False
world.Barrier()
instance.start()
return None
def stop_all(instances):
for x in instances:
x.stop()
def start_empty():
channel.MpiChannel.ensure_mpi_initialized()
world = MPI.COMM_WORLD
rank = world.rank
color = 0 if world.rank == 0 else 1
key = 0 if world.rank == 0 else world.rank -1
newcomm = world.Split(color, key)
localdup = world.Dup()
if world.rank == 0:
result = []
remote_leader = 1
tag = 1
new_intercomm = newcomm.Create_intercomm(0, localdup, remote_leader, tag)
instance = core.CodeInterface(check_mpi = False, must_start_worker = False)
instance.channel = channel.MpiChannel('_',None)
instance.channel.intercomm = new_intercomm
instance.world = localdup
instance.remote_leader = 1
world.Barrier()
return instance
else:
new_intercomm = newcomm.Create_intercomm(0, localdup, 0, color)
instance = CythonImplementation(None, core.CodeInterface)
instance.intercomm = new_intercomm
instance.world = localdup
instance.freeworld = newcomm
instance.localworld = newcomm
instance.must_disconnect = False
world.Barrier()
instance.start()
print "STOP...", world.rank
return None
def get_code(rank, codes):
if rank == 0:
return None
else:
index = 1
for color, x in enumerate(codes):
if rank >= index and rank < index + x.number_of_workers:
return x
index += x.number_of_workers
return None
```
#### File: amuse/support/debian.py
```python
from __future__ import print_function
import os
import shutil
import subprocess
try: # Python 3
from urllib.request import urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve
import platform
import sys
from optparse import OptionParser
depends = \
[
'bash (>= 2.05a-11)',
'build-essential',
'cmake',
'gfortran',
'python2.6 (>=2.6.0)',
'python-numpy (>=1.3.0)',
'python-nose (>=0.11)',
'python-matplotlib (>=0.99)',
'python-setuptools',
'python-docutils',
'python-h5py (>=1.2.1)',
'libhdf5-serial-dev (>=1.6)',
'hdf5-tools',
'libfftw3-3',
'libfftw3-dev',
'libfftw3-doc',
'libopenmpi-dev (>=1.4.1)',
'openmpi-bin',
'libgsl0ldbl',
'libgsl0-dev',
]
control = \
"""
Package: amuse
Version: {0}
Section: base
Priority: optional
Architecture: all
Depends: {1}
Maintainer: amuseteam <<EMAIL>>
Description: Astrophysical Multipurpose Software Environment
A software framework for large-scale simulations of dense
stellar systems, in which existing codes for dynamics,
stellar evolution, and hydrodynamics can be easily coupled.
"""
postinst = \
"""#!/bin/sh -e
easy_install mpi4py
"""
class generate_debian_package(object):
def __init__(self, version = 'svn', arch = None):
self.version = version
if self.version == 'svn':
self.version = 'r{0}'.format(self.get_svn_revision())
self.amuse_version = 'amuse-{0}'.format(self.version)
self.debianversion = '0ubuntu'
if arch is None:
self.architecture = 'i386' if platform.architecture()[0] == '32bit' else 'amd64'
else:
self.architecture = arch
self.package_name = 'amuse_{0}-{1}-{2}'.format(self.version, self.debianversion, self.architecture)
self.package_path = os.path.abspath(self.package_name)
def get_svn_revision(self):
stdoutstring, stderrstring = subprocess.Popen(
['svn','info'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
lines = stdoutstring.splitlines()
revision = '0000'
for x in lines:
if x.startswith('Revision:'):
label, revision = x.split(': ')
return revision
def run(self):
self.setup_deb_builddir()
self.makescripts()
self.install_in_deb_builddir()
self.package()
self.cleanup()
print("generated debian package: {0}.deb".format(self.package_name))
def makescripts(self):
#os.system('python setup.py generate_main --amuse-dir=/usr/share/{0}'.format(amuse_version))
pass
def setup_deb_builddir(self):
if os.path.exists(self.package_path):
shutil.rmtree(self.package_path)
os.makedirs(self.package_path)
def install_in_deb_builddir(self):
debian_path = os.path.join(self.package_path, 'DEBIAN')
os.makedirs(debian_path)
dependency_string = ', '.join(depends)
with open(os.path.join(debian_path, 'control'),'w') as f:
f.write(control.format(self.version, dependency_string))
#f = open(os.path.join(debian_path, 'postinst'),'w')
#f.write(postinst)
#f.close()
#os.chmod(os.path.join(debian_path, 'postinst'), 0b11110
if not os.path.exists('build'):
os.makedirs('build')
mpi4pyfile = 'mpi4py-1.2.2.tar.gz'
urlretrieve(
'http://mpi4py.googlecode.com/files/{0}'.format(mpi4pyfile),
mpi4pyfile
)
shutil.copyfile(mpi4pyfile, os.path.join('build', mpi4pyfile))
subprocess.call(
[
'tar',
'-xf',
mpi4pyfile
]
,
cwd=os.path.join('build')
)
subprocess.call(
[
sys.executable,
'setup.py',
'install',
'--prefix=/usr',
'--install-layout=deb',
'--root={0}'.format(self.package_path),
]
,
cwd=os.path.join('build','mpi4py-1.2.2')
)
subprocess.call(
[
sys.executable,
'setup.py',
'install',
'--prefix=/usr',
'--install-layout=deb',
'--root={0}'.format(self.package_path),
]
)
#shutil.copytree('src', './{0}/usr/share/{1}/src'.format(package_name, amuse_version))
#shutil.copytree('test', './{0}/usr/share/{1}/test'.format(package_name, amuse_version))
#shutil.copytree('data', './{0}/usr/share/{1}/data'.format(package_name, amuse_version))
#shutil.copytree('lib', './{0}/usr/share/{1}/lib'.format(package_name, amuse_version))
#shutil.copytree('doc', './{0}/usr/share/doc/{1}/doc'.format(package_name, amuse_version))
#self.touch('./{0}/usr/share/{1}/build.py'.format(package_name, amuse_version))
#shutil.copy('amuse.sh', './{0}/usr/bin'.format(package_name))
#shutil.copy('iamuse.sh', './{0}/usr/bin'.format(package_name))
#os.chmod('./{0}/usr/bin/amuse.sh'.format(package_name), 0b111101101)
#os.chmod('./{0}/usr/bin/iamuse.sh'.format(package_name), 0b111101101)
def package(self):
if os.path.exists(self.package_path):
print("creating debian package..")
subprocess.call(
[
'fakeroot',
'dpkg-deb',
'--build',
self.package_name
]
)
def cleanup(self):
#if os.path.exists(self.package_path):
# shutil.rmtree(self.package_path)
#os.system('python setup.py generate_main')
pass
def touch(self, filename):
if not os.path.exists(filename):
open(filename, 'w').close()
def main(version = 'svn', arch = None):
command = generate_debian_package(version, arch)
command.run()
def new_option_parser():
result = OptionParser()
result.add_option(
"-v", "--version",
default = 'svn',
dest="version",
help="version of the debian package to create, defaults to svn based",
type="string"
)
result.add_option(
"-a", "--arch",
dest="arch",
default = None,
help="architecture to build (i386 or amd64)",
type="string"
)
return result
if __name__ == "__main__":
options, arguments = new_option_parser().parse_args()
main(**options.__dict__)
```
#### File: binbuild/build/linux_set_rpath.py
```python
import os
import re
import os.path
import subprocess
from optparse import OptionParser
def check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
shared_library_re=re.compile(r'.*\.so.*')
def get_so_files(path='.'):
for name in os.listdir(path):
if not shared_library_re.match(name):
continue
fullname = os.path.join(path, name)
if os.path.islink(fullname):
continue
yield fullname
def get_bin_files(path='.'):
for dirname, subdirs, names in os.walk(path):
for name in names:
fullname = os.path.join(dirname, name)
if os.path.islink(fullname):
continue
outputstring = check_output(['file', fullname])
if not outputstring.find('ELF') >= 0:
continue
if (
not outputstring.find('shared object') >= 0 and
not outputstring.find('executable') >= 0
):
continue
yield os.path.normpath(os.path.abspath(fullname))
def get_rpaths(name):
arguments = ['patchelf', '--print-rpath', name]
outputstring = check_output(arguments)
outputstring = outputstring.strip()
return [x for x in outputstring.split(':') if len(x) > 0]
def add_rpaths(name, rpaths, dryrun = True):
existing_paths = get_rpaths(name)
parts = list(rpaths)
parts.extend(existing_paths)
set_rpaths(name, parts, dryrun)
def set_rpaths(name, rpaths, dryrun = True):
rpath = ':'.join(rpaths)
arguments = ['patchelf', '--set-rpath', rpath, name]
if dryrun == True:
print ' '.join(arguments)
else:
outputstring = check_output(arguments)
print outputstring
def main(libpath='../lib', binpath='.', rpath='', dryrun = True):
abslibpath = os.path.abspath(libpath)
for name in get_bin_files(binpath):
must_add_rpath = False
if len(rpath) == 0:
absbinpath = os.path.abspath(name)
relpath = os.path.relpath(abslibpath, os.path.dirname(absbinpath))
print relpath, abslibpath
if not relpath or relpath == '.':
newrpath = '$ORIGIN'
else:
newrpath = os.path.join('$ORIGIN',relpath)
else:
newrpath = rpath
print "file:", name
if dryrun:
currentpaths = get_rpaths(name)
if len(currentpaths) > 0:
print "CURRENT RPATHS(s):"
for x in get_rpaths(name):
print " ** ",x
if newrpath == '$ORIGIN' and len(get_rpaths(name)) == 0:
print " NOT SETTING RPATH FOR: ",name
continue
#, '$ORIGIN'
set_rpaths(name, [newrpath], dryrun)
#for x in get_dylib_files(path):
# basename = os.path.basename(dylibid)
# change_dylib_id(x, os.path.join('@rpath', basename), dryrun)
def new_option_parser():
result = OptionParser()
result.add_option(
"-r", "--rpath",
default = '',
dest="rpath",
help="new path for dynamic libraries, default will create relative path between bin and libdir",
type="string"
)
result.add_option(
"-p", "--path",
default = '../lib',
dest="libpath",
help="path containing the shared libraries",
type="string"
)
result.add_option(
"-b", "--bin-path",
default = '.',
dest="binpath",
help="path to scan for binaries referencing dynamic libraries",
type="string"
)
result.add_option(
"--dry-run",
default = False,
dest="dryrun",
help="if given will show the commands, not execute these",
action="store_true"
)
return result
if __name__ == '__main__':
options,argumenst = new_option_parser().parse_args()
main(**options.__dict__)
```
#### File: test/codes_tests/test_athena.py
```python
import os
import sys
import numpy
import math
from amuse.test.amusetest import TestWithMPI
from amuse.community.athena.interface import AthenaInterface, Athena
from amuse.units.quantities import VectorQuantity
from amuse.units import generic_unit_system
from amuse.units import units
from amuse.units import generic_unit_converter
from amuse import datamodel
class TestAthenaInterface(TestWithMPI):
def test0(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.stop()
def test1(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.par_seti("test", "testname", "%d", 10, "a test parameter")
x = instance.par_geti("test", "testname")
self.assertEquals(x, 10)
instance.stop()
def test2(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.par_setd("test", "test2", "%.15e", 1.123, "a test parameter")
x = instance.par_getd("test", "test2")
self.assertEquals(x, 1.123)
instance.stop()
def test3(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.setup_mesh(5, 1, 1, 1.0, 0.0, 0.0)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
x = instance.par_geti("domain1", "Nx1")
self.assertEquals(x, 5)
error = instance.commit_parameters()
self.assertEquals(error, 0)
number_of_grids, error = instance.get_number_of_grids()
self.assertEquals(error, 0)
self.assertEquals(number_of_grids, 1)
x,y,z,error = instance.get_position_of_index(0,0,0,1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 0.1)
x,y,z,error = instance.get_position_of_index(1,0,0,1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 0.3)
x,y,z,error = instance.get_position_of_index(2,0,0,1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 0.5)
x,y,z,error = instance.get_position_of_index(3,0,0,1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 0.7)
x,y,z,error = instance.get_position_of_index(4,0,0,1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 0.9)
x,y,z,error = instance.get_position_of_index(5,0,0,1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 1.1)
instance.stop()
def test4(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.setup_mesh(10, 20, 40, 1.0, 1.0, 1.0)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
imin, imax, jmin, jmax, kmin, kmax = instance.get_index_range_inclusive()
x,y,z, error= instance.get_position_of_index(2,2,2)
self.assertEquals(error, -1)
result = instance.commit_parameters()
self.assertEquals(result, 0)
x,y,z, error= instance.get_position_of_index(0,0,0)
self.assertEquals(error, 0)
print x,y,z
self.assertAlmostRelativeEquals(0.05, x)
self.assertAlmostRelativeEquals(0.025, y)
self.assertAlmostRelativeEquals(0.0125, z)
x,y,z, error= instance.get_position_of_index(10,20,40)
self.assertEquals(error, 0)
print x,y,z
self.assertAlmostRelativeEquals(1.05, x)
self.assertAlmostRelativeEquals(1.025, y)
self.assertAlmostRelativeEquals(1.0125, z)
instance.stop()
def test5(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(5, 5, 5, 1.0, 1.0, 1.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.4)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
result = instance.commit_parameters()
self.assertEquals(result, 0)
time, error = instance.get_time()
self.assertEquals(error,0)
self.assertEquals(time, 0.0)
error = instance.set_grid_state(1,1,1,0.1, 0.2, 0.3, 0.4, 0.5)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(1,1,1)
self.assertEquals(error, 0)
self.assertEquals(rho, 0.1)
self.assertEquals(rhovx, 0.2)
self.assertEquals(rhovy, 0.3)
self.assertEquals(rhovz, 0.4)
self.assertEquals(energy, 0.5)
rhovx, rhovy, rhovz, error = instance.get_grid_momentum_density(1,1,1)
self.assertEquals(error, 0)
self.assertEquals(rhovx, 0.2)
self.assertEquals(rhovy, 0.3)
self.assertEquals(rhovz, 0.4)
rho, error = instance.get_grid_density(1,1,1)
self.assertEquals(error, 0)
self.assertEquals(rho, 0.1)
energy, error = instance.get_grid_energy_density(1,1,1)
self.assertEquals(error, 0)
self.assertEquals(energy, 0.5)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state([1],[1],[1])
self.assertEquals(error[0], 0)
self.assertEquals(rho[0], 0.1)
error = instance.initialize_grid()
self.assertEquals(error, 0)
timestep, error = instance.get_timestep()
self.assertEquals(error, 0)
instance.stop()
def test5a(self):
instance=self.new_instance(AthenaInterface, mode="mhd")
instance.initialize_code()
instance.setup_mesh(5, 5, 5, 1.0, 1.0, 1.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.4)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
result = instance.commit_parameters()
self.assertEquals(result, 0)
time, error = instance.get_time()
self.assertEquals(error,0)
self.assertEquals(time, 0.0)
error = instance.set_grid_magnetic_field(1,1,1,0.1, 0.2, 0.3)
self.assertEquals(error, 0)
B1i, B2i, B3i, error = instance.get_grid_magnetic_field(1,1,1)
self.assertEquals(error, 0)
self.assertEquals(B1i, 0.1)
self.assertEquals(B2i, 0.2)
self.assertEquals(B3i, 0.3)
instance.stop()
def test7(self):
results = []
for x in range(1,5):
instance=self.new_instance(AthenaInterface, number_of_workers=x)
instance.initialize_code()
instance.setup_mesh(128,1,1,1.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
result = instance.commit_parameters()
self.assertEquals(result, 0)
nghost, error = instance.get_nghost()
self.assertEquals(4, nghost)
instance.set_grid_state(numpy.arange(0,128), numpy.zeros(128), numpy.zeros(128),0.1, 0.2, 0.3, 0.4, 0.5)
error = instance.initialize_grid()
self.assertEquals(error, 0)
result = instance.get_grid_state(numpy.arange(0,128), numpy.zeros(128), numpy.zeros(128))
results.append(list(result))
instance.stop()
for x in range(128):
for y in range(6):
self.assertEquals(results[1][y][x], results[0][y][x])
self.assertEquals(results[2][y][x], results[0][y][x])
self.assertEquals(results[3][y][x], results[0][y][x])
def test8(self):
instance=self.new_instance(AthenaInterface, number_of_workers=1)
instance.initialize_code()
instance.setup_mesh(128,1,1,1.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
result = instance.commit_parameters()
self.assertEquals(result, 0)
instance.fill_grid_linearwave_1d(0, 1e-06, 0.0, 1)
error = instance.initialize_grid()
self.assertEquals(error, 0)
print instance.get_grid_state(numpy.arange(0,128), numpy.zeros(128), numpy.zeros(128))
timestep, error = instance.get_timestep()
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(timestep, 0.006249991, 5)
rho0, rhovx0, rhovy0, rhovz0, energy0, error0 = instance.get_grid_state(numpy.arange(0,128), numpy.zeros(128), numpy.zeros(128))
instance.evolve_model(5.0)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(numpy.arange(0,128), numpy.zeros(128), numpy.zeros(128))
error_rho = numpy.sum(numpy.abs(rho - rho0))
error_rhovx = numpy.sum(numpy.abs(rhovx - rhovx0))
error_rhovy = numpy.sum(numpy.abs(rhovy - rhovy0))
error_rhovz = numpy.sum(numpy.abs(rhovz - rhovz0))
error_energy = numpy.sum(numpy.abs(energy - energy0))
self.assertAlmostRelativeEquals(error_rho / 128.0, 1.877334e-09, 6)
self.assertAlmostRelativeEquals(error_rhovx / 128.0, 1.877334e-09, 6)
self.assertAlmostRelativeEquals(error_energy / 128.0, 2.816001e-09, 6)
instance.stop()
def test9(self):
results = []
for x in range(1,5):
instance=self.new_instance(AthenaInterface, number_of_workers=x)
instance.initialize_code()
instance.setup_mesh(128,1,1,1.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
result = instance.commit_parameters()
self.assertEquals(result, 0)
nghost, error = instance.get_nghost()
self.assertEquals(4, nghost)
instance.fill_grid_linearwave_1d(0, 1e-06, 0.0, 1)
error = instance.initialize_grid()
self.assertEquals(error, 0)
instance.evolve_model(5.0)
result = instance.get_grid_state(numpy.arange(0,128), numpy.zeros(128), numpy.zeros(128))
results.append(list(result))
instance.stop()
for x in range(128):
for y in range(6):
self.assertEquals(results[1][y][x], results[0][y][x])
self.assertEquals(results[2][y][x], results[0][y][x])
self.assertEquals(results[3][y][x], results[0][y][x])
def test10(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.setup_mesh(5, 5, 5, 1.0, 1.0, 1.0)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
instance.commit_parameters()
instance.initialize_grid()
x,y,z,error = instance.get_position_of_index([0,1,2,3,4],[0,0,0,0,0],[0,0,0,0,0])
for x0, x1 in zip(x, [0.1, 0.3, 0.5, 0.7, 0.9]):
self.assertAlmostRelativeEqual(x0, x1)
for y0, y1 in zip(y, [0.1, 0.1, 0.1, 0.1, 0.1]):
self.assertAlmostRelativeEqual(y0, y1)
i,j,k,error = instance.get_index_of_position(0.3, 0.1, 0.1)
print i,j,k
self.assertAlmostRelativeEqual(i, 1)
self.assertAlmostRelativeEqual(j, 0)
self.assertAlmostRelativeEqual(k, 0)
i,j,k,error = instance.get_index_of_position(0.39, 0.1, 0.1)
self.assertAlmostRelativeEqual(i, 1.0)
self.assertAlmostRelativeEqual(j, 0)
self.assertAlmostRelativeEqual(k, 0)
i,j,k,error = instance.get_index_of_position(0.4, 0.1, 0.1)
self.assertAlmostRelativeEqual(i, 2.0)
self.assertAlmostRelativeEqual(j, 0)
self.assertAlmostRelativeEqual(k, 0)
x,y,z,error = instance.get_position_of_index(-1,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(x, -0.1)
x,y,z,error = instance.get_position_of_index(5,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(x, 1.1)
instance.stop()
def test6(self):
instance=self.new_instance(AthenaInterface, number_of_workers = 5)
instance.initialize_code()
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.setup_mesh(100, 200, 400, 10.0, 10.0, 10.0)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
imin, imax, jmin, jmax, kmin, kmax = instance.get_index_range_inclusive()
x,y,z, error= instance.get_position_of_index(2,2,2)
self.assertEquals(error, -1)
result = instance.commit_parameters()
self.assertEquals(result, 0)
x,y,z, error= instance.get_position_of_index(0,0,0)
self.assertEquals(error, 0)
print x,y,z
self.assertAlmostRelativeEquals(0.05, x)
self.assertAlmostRelativeEquals(0.025, y)
self.assertAlmostRelativeEquals(0.0125, z)
x,y,z, error= instance.get_position_of_index(100,200,400)
self.assertEquals(error, 0)
print x,y,z
self.assertAlmostRelativeEquals(10.05, x)
self.assertAlmostRelativeEquals(10.025, y)
self.assertAlmostRelativeEquals(10.0125, z)
instance.stop()
def test11(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.setup_mesh(10, 20, 40, 1.0, 1.0, 1.0)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
imin, imax, jmin, jmax, kmin, kmax = instance.get_index_range_inclusive()
self.assertEquals(imin, 0)
self.assertEquals(jmin, 0)
self.assertEquals(kmin, 0)
self.assertEquals(imax, 9)
self.assertEquals(jmax, 19)
self.assertEquals(kmax, 39)
imin, imax, jmin, jmax, kmin, kmax = instance.get_index_range_for_potential()
self.assertEquals(imin, -1)
self.assertEquals(jmin, -1)
self.assertEquals(kmin, -1)
self.assertEquals(imax, 10)
self.assertEquals(jmax, 20)
self.assertEquals(kmax, 40)
instance.stop()
def test12(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.setup_mesh(10, 20, 40, 1.0, 1.0, 1.0)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
instance.commit_parameters()
instance.initialize_grid()
x,y,z, error= instance.get_position_of_index(-1,-1,-1)
self.assertEquals(error, 0)
print x,y,z
self.assertAlmostRelativeEquals(-0.05, x)
self.assertAlmostRelativeEquals(-0.025, y)
self.assertAlmostRelativeEquals(-0.0125, z)
instance.stop()
def test13(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.setup_mesh(2, 2, 2, 1.0, 1.0, 1.0)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
instance.commit_parameters()
instance.initialize_grid()
potential_along_one_axis = [-1.0,0.0,1.0,2.0]
instance.set_potential(
[-1,0,1,2],
[0,0,0,0],
[0,0,0,0],
potential_along_one_axis
)
got_potential,error = instance.get_potential(
[-1,0,1,2],
[0,0,0,0],
[0,0,0,0])
print got_potential, error
for expected, actual in zip(potential_along_one_axis, got_potential):
self.assertEquals(expected, actual)
x,y,z,error = instance.get_position_of_index(
[-1,0,1,2],
[0,0,0,0],
[0,0,0,0])
print x,y,z, error
for expected, actual in zip([-0.25,0.25,0.75,1.25], x):
self.assertEquals(expected, actual)
for expected, actual in zip([0.25,0.25,0.25,0.25], y):
self.assertEquals(expected, actual)
for expected, actual in zip([0.25,0.25,0.25,0.25], z):
self.assertEquals(expected, actual)
potential, error = instance.get_interpolated_gravitational_potential(0, 0.25, 0.25)
print potential, error
self.assertEquals(error, 0)
self.assertEquals(potential, -0.5)
potential, error = instance.get_interpolated_gravitational_potential(0.75, 0.5, 0.25)
print potential, error
self.assertEquals(error, 0)
self.assertEquals(potential, 0.5)
potential, error = instance.get_interpolated_gravitational_potential(0.75, 0.25, 0.5)
print potential, error
self.assertEquals(error, 0)
self.assertEquals(potential, 0.5)
potential, error = instance.get_interpolated_gravitational_potential(0.75, 0.25, 0.0)
print potential, error
self.assertEquals(error, 0)
self.assertEquals(potential, 0.5)
instance.stop()
def test14(self):
instance=self.new_instance(AthenaInterface, mode="scalar")
instance.initialize_code()
instance.setup_mesh(5, 5, 5, 1.0, 1.0, 1.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.4)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
result = instance.commit_parameters()
self.assertEquals(result, 0)
error = instance.set_grid_scalar(1,1,1,0.45)
self.assertEquals(error, 0)
scalar, error = instance.get_grid_scalar(1,1,1)
self.assertEquals(error, 0)
self.assertEquals(scalar, 0.45)
scalar, error = instance.get_grid_scalar(1,1,2)
self.assertEquals(error, 0)
self.assertEquals(scalar, 0)
instance.stop()
def test15(self):
results = []
for x in range(1,6):
instance=self.new_instance(AthenaInterface, number_of_workers=x)
instance.initialize_code()
instance.setup_mesh(100,1,1,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
result = instance.commit_parameters()
for index in range(100):
x,y,z,error = instance.get_position_of_index(index,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, index + 0.5)
i,j,k,error = instance.get_index_of_position(x,y,z)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(i, index)
instance.stop()
def test16(self):
for x in range(1,6):
instance=self.new_instance(AthenaInterface, number_of_workers=x)
instance.initialize_code()
instance.setup_mesh(10,100,1,100.0,100.0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
result = instance.commit_parameters()
for index in range(100):
x,y,z,error = instance.get_position_of_index(0,index,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(y, index + 0.5)
i,j,k,error = instance.get_index_of_position(x,y,z)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(j, index)
instance.stop()
def test17(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,1,1,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","periodic","periodic","periodic","periodic","periodic")
instance.commit_parameters()
minx, maxx, miny, maxy, minz, maxz, error = instance.get_boundary_index_range_inclusive(1,1)
self.assertEquals(error, 0)
self.assertEquals(minx, 0)
self.assertEquals(maxx, 3)
self.assertEquals(miny, 0)
self.assertEquals(maxy, 0)
self.assertEquals(minz, 0)
self.assertEquals(maxz, 0)
for i in range(2,7):
minx, maxx, miny, maxy, minz, maxz, error = instance.get_boundary_index_range_inclusive(i,1)
self.assertEquals(error, 0)
self.assertEquals(minx, 0)
self.assertEquals(maxx, 0)
self.assertEquals(miny, 0)
self.assertEquals(maxy, 0)
self.assertEquals(minz, 0)
self.assertEquals(maxz, 0)
def test18(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,5,6,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","periodic","periodic","periodic","periodic","periodic")
instance.commit_parameters()
minx, maxx, miny, maxy, minz, maxz, error = instance.get_boundary_index_range_inclusive(1,1)
self.assertEquals(error, 0)
self.assertEquals(minx, 0)
self.assertEquals(maxx, 3)
self.assertEquals(miny, 0)
self.assertEquals(maxy, 4)
self.assertEquals(minz, 0)
self.assertEquals(maxz, 5)
for i in range(2,7):
minx, maxx, miny, maxy, minz, maxz, error = instance.get_boundary_index_range_inclusive(i,1)
self.assertEquals(error, 0)
self.assertEquals(minx, 0)
self.assertEquals(maxx, 0)
self.assertEquals(miny, 0)
self.assertEquals(maxy, 0)
self.assertEquals(minz, 0)
self.assertEquals(maxz, 0)
def test19(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,5,6,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","interface","interface")
instance.commit_parameters()
for i in range(1,7):
minx, maxx, miny, maxy, minz, maxz, error = instance.get_boundary_index_range_inclusive(i,1)
self.assertEquals(error, 0)
self.assertEquals(minx, 0)
self.assertEquals(miny, 0)
self.assertEquals(minz, 0)
if i == 1 or i == 2:
self.assertEquals(maxx, 3)
self.assertEquals(maxy, 4)
self.assertEquals(maxz, 5)
elif i == 3 or i == 4:
self.assertEquals(maxx, 99+8)
self.assertEquals(maxy, 3)
self.assertEquals(maxz, 5)
elif i == 5 or i == 6:
self.assertEquals(maxx, 99+8)
self.assertEquals(maxy, 4+8)
self.assertEquals(maxz, 3)
def test20(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,1,1,100.0,100.0,100.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","periodic","periodic","periodic","periodic","periodic")
instance.commit_parameters()
for i in range(4):
error = instance.set_boundary_state(
i,0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
1.0, 1.0 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i, 0, 0,
1.0, 1.0
)
print rho, rhovx, rhovy, rhovz, rhoen, error
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test21(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,1,1,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","periodic","periodic","periodic","periodic")
instance.commit_parameters()
for i in range(4):
for j in [1,2]:
error = instance.set_boundary_state(
i,0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
j, 1.0 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i, 0, 0,
j, 1.0
)
print j
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test22(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(5,6,7,100.0,100.0,100.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","interface","interface")
instance.commit_parameters()
x1range = (4,6,7)
x2range = (5,4,7)
x3range = (5,6,4)
for xrange, j in zip([x1range, x1range, x2range, x2range, x3range, x3range], [1,2,3,4,5,6]):
for i0 in range(xrange[0]):
for j0 in range(xrange[1]):
for k0 in range(xrange[2]):
i = (i0 * (xrange[2] * xrange[1])) + (j0 * xrange[2]) + k0
error = instance.set_boundary_state(
i0, j0, k0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
j, 1.0 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i0, j0, k0, # index
j, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test24(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,5,1,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","interface","interface")
instance.commit_parameters()
for i in range(1,7):
minx, maxx, miny, maxy, minz, maxz, error = instance.get_boundary_index_range_inclusive(i,1)
self.assertEquals(error, 0)
self.assertEquals(minx, 0)
self.assertEquals(miny, 0)
self.assertEquals(minz, 0)
if i == 1 or i == 2:
self.assertEquals(maxx, 3)
self.assertEquals(maxy, 4)
self.assertEquals(maxz, 0)
elif i == 3 or i == 4:
self.assertEquals(maxx, 99+8)
self.assertEquals(maxy, 3)
self.assertEquals(maxz, 0)
elif i == 5 or i == 6:
self.assertEquals(maxx, 99+8)
self.assertEquals(maxy, 4 +8)
self.assertEquals(maxz, 3)
def test25(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,1,1,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","periodic","periodic","periodic","periodic")
instance.commit_parameters()
dx = 1.0
for i in range(4):
x,y,z,error = instance.get_boundary_position_of_index(
i,0,0,
1, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) - ((4 -i)*dx))
self.assertAlmostRelativeEquals(y, 0.0)
self.assertAlmostRelativeEquals(z, 0.0)
def test26(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,1,1,100.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","periodic","periodic","periodic","periodic")
instance.commit_parameters()
dx = 1.0
for i in range(4):
x,y,z,error = instance.get_boundary_position_of_index(
i,0,0,
2, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 100.0 + (0.5 * dx) + (i * dx))
self.assertAlmostRelativeEquals(y, 0.0)
self.assertAlmostRelativeEquals(z, 0.0)
def test27(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(100,5,1,100.0,100.0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","periodic","periodic")
instance.commit_parameters()
dx = 1.0
dy = 100.0 / 5.0
for i in range(4):
for j in range(5):
x,y,z,error = instance.get_boundary_position_of_index(
i, j, 1,
2, 1
)
print y, j, (0.5 * dy) - ((4 - j) * dy)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 100.0 + (0.5 * dx) + (i * dx))
self.assertAlmostRelativeEquals(y, (0.5 * dy) + (j * dy))
self.assertAlmostRelativeEquals(z, 0.0)
for i in range(100 + 8):
for j in range(4):
x,y,z,error = instance.get_boundary_position_of_index(
i, j, 1,
3, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, ((0.5 * dy) - ((4-j) * dy)))
self.assertAlmostRelativeEquals(z, 0.0)
x,y,z,error = instance.get_boundary_position_of_index(
i, j, 1,
4, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, 100.0 + (0.5 * dy) + (j * dy))
self.assertAlmostRelativeEquals(z, 0.0)
def test28(self):
results = []
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(3, 3, 3, 6,12,18)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","interface","interface")
instance.commit_parameters()
dx = 6.0 / 3.0
dy = 12.0 / 3.0
dz = 18.0 / 3.0
for i in range(4):
for j in range(3):
for k in range(3):
x,y,z,error = instance.get_boundary_position_of_index(
i, j, k,
2, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, 6.0 + (0.5 * dx) + (i * dx))
self.assertAlmostRelativeEquals(y, (0.5 * dy) + (j * dy))
self.assertAlmostRelativeEquals(z, (0.5 * dz) + (k * dz))
for i in range(3 + 8):
for j in range(4):
for k in range(3):
x,y,z,error = instance.get_boundary_position_of_index(
i, j, k,
3, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, ((0.5 * dy) - ((4-j) * dy)))
self.assertAlmostRelativeEquals(z, (0.5 * dz) + (k * dz))
x,y,z,error = instance.get_boundary_position_of_index(
i, j, k,
4, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, 12.0 + (0.5 * dy) + (j * dy))
self.assertAlmostRelativeEquals(z, (0.5 * dz) + (k * dz))
for i in range(3 + 8):
for j in range(3 + 8):
for k in range(4):
x,y,z,error = instance.get_boundary_position_of_index(
i, j, k,
5, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, (0.5 * dy) + ((j-4) * dy))
self.assertAlmostRelativeEquals(z, ((0.5 * dz) - ((4-k) * dz)))
x,y,z,error = instance.get_boundary_position_of_index(
i, j, k,
6, 1
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(x, (0.5 * dx) + ((i-4) * dx))
self.assertAlmostRelativeEquals(y, (0.5 * dy) + ((j-4) * dy))
self.assertAlmostRelativeEquals(z, 18.0 + (0.5 * dz) + (k * dz))
def test29(self):
results = []
instance=self.new_instance(AthenaInterface, number_of_workers = 3)
instance.initialize_code()
instance.setup_mesh(300,1,1,300.0,0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","periodic","periodic","periodic","periodic")
instance.commit_parameters()
for j in [1,2]:
print j
for i in range(4):
error = instance.set_boundary_state(
i,0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
j, 1.0 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i, 0, 0,
j, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test30(self):
results = []
instance=self.new_instance(AthenaInterface, number_of_workers = 3)
instance.initialize_code()
instance.setup_mesh(30,10,1,30.0,10.0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","periodic","periodic")
instance.commit_parameters()
for boundaryindex in [3,4]:
for i0 in range(38):
for j0 in range(4):
i = (i0 * (4*38)) + j0
error = instance.set_boundary_state(
i0,j0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
boundaryindex, 1 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i0, j0, 0,
boundaryindex, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test31(self):
results = []
instance=self.new_instance(AthenaInterface, number_of_workers = 3)
instance.initialize_code()
instance.set_auto_decomposition(0)
instance.set_parallel_decomposition(1,3,1)
instance.setup_mesh(5,6,1,5.0,6.0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","periodic","periodic")
instance.commit_parameters()
for boundaryindex in [1,2]:
for i0 in range(4):
for j0 in range(6):
i = (i0 * (4*6)) + j0
error = instance.set_boundary_state(
i0,j0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
boundaryindex, 1 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i0, j0, 0,
boundaryindex, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test32(self):
results = []
instance=self.new_instance(AthenaInterface, number_of_workers = 3*3)
instance.initialize_code()
instance.set_auto_decomposition(0)
instance.set_parallel_decomposition(3,3,1)
instance.setup_mesh(6,6,1,6.0,6.0,0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","periodic","periodic")
instance.commit_parameters()
for boundaryindex in [1,2]:
for i0 in range(4):
for j0 in range(6):
i = (i0 * (4*6)) + j0
error = instance.set_boundary_state(
i0,j0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
boundaryindex, 1 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i0, j0, 0,
boundaryindex, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
for boundaryindex in [3,4]:
for i0 in range(6+8):
for j0 in range(4):
i = (i0 * (4*(6+8))) + j0
error = instance.set_boundary_state(
i0,j0,0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
boundaryindex, 1 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i0, j0, 0,
boundaryindex, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test33(self):
results = []
instance=self.new_instance(AthenaInterface, number_of_workers = 3)
instance.initialize_code()
instance.set_auto_decomposition(0)
instance.set_parallel_decomposition(1,1,3)
instance.setup_mesh(5,5,6,5.0,5.0,30.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","periodic","periodic")
instance.commit_parameters()
for boundaryindex in [1,2]:
for i0 in range(4):
for j0 in range(5):
for z0 in range(6):
i = (i0 * (5*6)) + (j0 * 6) + z0
error = instance.set_boundary_state(
i0,j0,z0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
boundaryindex, 1 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i0, j0, z0,
boundaryindex, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test34(self):
results = []
instance=self.new_instance(AthenaInterface, number_of_workers = 3)
instance.initialize_code()
instance.set_auto_decomposition(0)
instance.set_parallel_decomposition(3,1,1)
instance.setup_mesh(6,5,5,6.0,5.0,5.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","interface","interface")
instance.commit_parameters()
for boundaryindex in [5,6]:
for i0 in range(6+8):
for j0 in range(5+8):
for z0 in range(4):
i = (i0 * (5*4)) + (j0 * 4) + z0
error = instance.set_boundary_state(
i0,j0,z0, # index
1.0 * (i+1), # density
2.0 * (i+1), 3.0 * (i+1), 4.0 * (i+1), # momentum
5.0 * (i+1), # energy
boundaryindex, 1 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
i0, j0, z0,
boundaryindex, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (i+1))
def test35(self):
results = []
instance=self.new_instance(AthenaInterface, number_of_workers = 9)
instance.initialize_code()
instance.set_auto_decomposition(0)
instance.set_parallel_decomposition(3,3,1)
instance.setup_mesh(6,6,5,6.0,6.0,5.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.8)
instance.set_boundary("interface","interface","interface","interface","interface","interface")
instance.commit_parameters()
boundary_indices = []
all_i0 = []
all_j0 = []
all_z0 = []
all_i = []
for boundaryindex in [5,6]:
for i0 in range(6+8):
for j0 in range(6+8):
for z0 in range(4):
boundary_indices.append(boundaryindex)
all_i0.append(i0)
all_j0.append(j0)
all_z0.append(z0)
i = (i0 * (5*4)) + (j0 * 4) + z0
all_i.append(i)
all_i = numpy.asarray(all_i)
error = instance.set_boundary_state(
all_i0,all_j0,all_z0, # index
1.0 * (all_i+1), # density
2.0 * (all_i+1), 3.0 * (all_i+1), 4.0 * (all_i+1), # momentum
5.0 * (all_i+1), # energy
boundary_indices, 1 # boundary + grid
)
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, rhoen, error = instance.get_boundary_state(
all_i0, all_j0, all_z0,
boundaryindex, 1.0
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 1.0 * (all_i+1))
self.assertAlmostRelativeEquals(rhovx, 2.0 * (all_i+1))
self.assertAlmostRelativeEquals(rhovy, 3.0 * (all_i+1))
self.assertAlmostRelativeEquals(rhovz, 4.0 * (all_i+1))
self.assertAlmostRelativeEquals(rhoen, 5.0 * (all_i+1))
def test36(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.setup_mesh(4, 3, 2, 1.0, 1.0, 1.0)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
instance.commit_parameters()
instance.initialize_grid()
ax_in = [1.,2.,3.,4.]
ay_in = [3,4,5,6]
az_in = [5,6,7,8]
instance.set_grid_acceleration(
[0,1,2,3],
[0,0,0,0],
[0,1,0,1],
ax_in,
ay_in,
az_in,
[1,1,1,1],
)
ax_out, ay_out, az_out, error = instance.get_grid_acceleration(
[0,1,2,3],
[0,0,0,0],
[0,1,0,1],
[1,1,1,1]
)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(ax_in, ax_out)
self.assertAlmostRelativeEquals(ay_in, ay_out)
self.assertAlmostRelativeEquals(az_in, az_out)
instance.stop()
def test37(self):
instance=self.new_instance(AthenaInterface)
instance.initialize_code()
instance.setup_mesh(4,1,1, 1.0, 1.0, 1.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.4)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
result = instance.commit_parameters()
self.assertEquals(result, 0)
time, error = instance.get_time()
self.assertEquals(error,0)
self.assertEquals(time, 0.0)
for i in range(4):
error = instance.set_grid_state(i,0,0,0.1 * (i+1), 0.2 * (i+1), 0.3 * (i+1), 0.4 * (i+1), 0.5 * (i+1))
self.assertEquals(error, 0)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0,0,0)
self.assertEquals(error, 0)
self.assertEquals(rho, 0.1)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(-1,0,0)
self.assertEquals(error, 0)
self.assertEquals(rho, 0.0)
self.assertEquals(rhovx, 0.0)
self.assertEquals(rhovy, 0.0)
self.assertEquals(rhovz, 0.0)
self.assertEquals(energy, 0.0)
instance.initialize_grid()
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(-1,0,0)
self.assertEquals(error, 0)
self.assertEquals(rho, 0.4)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(-2,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.3)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(-3,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.2)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(-4,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.1)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(-5,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.0)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(4,0,0)
self.assertEquals(error, 0)
self.assertEquals(rho, 0.1)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(5,0,0)
self.assertEquals(error, 0)
self.assertEquals(rho, 0.2)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(6,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.3)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(7,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.4)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(8,0,0)
self.assertEquals(error, 0)
self.assertEquals(rho, 0.0)
instance.stop()
def test38(self):
instance=self.new_instance(AthenaInterface, number_of_workers = 8)
instance.initialize_code()
instance.set_auto_decomposition(0)
instance.set_parallel_decomposition(2,2,2)
instance.setup_mesh(8, 8, 8, 1.0, 1.0, 1.0)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.4)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
result = instance.commit_parameters()
self.assertEquals(result, 0)
time, error = instance.get_time()
self.assertEquals(error,0)
self.assertEquals(time, 0.0)
for i in range(8):
error = instance.set_grid_state(i,0,0,0.1 * (i+1), 0.2 * (i+1), 0.3 * (i+1), 0.4 * (i+1), 0.5 * (i+1))
error = instance.set_grid_state(0,i,0,0.1 * (i+1), 0.2 * (i+1), 0.3 * (i+1), 0.4 * (i+1), 0.5 * (i+1))
error = instance.set_grid_state(0,0,i,0.1 * (i+1), 0.2 * (i+1), 0.3 * (i+1), 0.4 * (i+1), 0.5 * (i+1))
self.assertEquals(error, 0)
instance.initialize_grid()
for i in range(8):
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(i,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, (i+1) * 0.1)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0,i,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, (i+1) * 0.1)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0,0,i)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, (i+1) * 0.1)
for i in range(4):
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(-(i+1),0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.8 - (i * 0.1))
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(-5,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.0)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(8 + i,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, (i+1) * 0.1)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(8 + 4,0,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.0)
# 2 dimension
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0, -(i+1),0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.8 - (i * 0.1))
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0,-5, 0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.0)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0,8 + i,0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, (i+1) * 0.1)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0,8+4, 0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.0)
# 3 dimension
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0, 0, -(i+1))
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.8 - (i * 0.1))
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0,-5, 0)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.0)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0,0,8 + i)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, (i+1) * 0.1)
rho, rhovx, rhovy, rhovz, energy, error = instance.get_grid_state(0,0, 8+4)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(rho, 0.0)
instance.stop()
class TestAthena(TestWithMPI):
def test0(self):
instance=self.new_instance(Athena)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.parameters.nx = 10
instance.parameters.ny = 20
instance.parameters.nz = 40
instance.parameters.length_x = 1 | generic_unit_system.length
instance.parameters.length_y = 2 | generic_unit_system.length
instance.parameters.length_z = 3 | generic_unit_system.length
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
#print instance.grid[0].y
density = generic_unit_system.mass / (generic_unit_system.length ** 3)
momentum = generic_unit_system.mass / (generic_unit_system.time * (generic_unit_system.length**2))
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
firstx = instance.grid[0][0][0].x
allx = instance.grid[0].x
for j in range(20):
for k in range(40):
self.assertEquals(allx[j][k], firstx)
print instance.grid[0][0].rho
self.assertEquals(instance.grid[0][0][0].rho, 0.0 |generic_unit_system.mass / generic_unit_system.length ** 3)
potential_grid = datamodel.Grid(12,22,42)
potential_grid.potential = 2.0 | generic_unit_system.potential
channel = potential_grid.new_channel_to(instance.potential_grid)
channel.copy()
self.assertEquals(instance.potential_grid[0][0][0].potential, 2.0 | generic_unit_system.potential)
self.assertEquals(instance.potential_grid[0][2][20].potential, 2.0 | generic_unit_system.potential)
instance.stop()
def test1(self):
instance=self.new_instance(Athena)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.parameters.nx = 10
instance.parameters.ny = 20
instance.parameters.nz = 40
instance.parameters.length_x = 1 | generic_unit_system.length
instance.parameters.length_y = 2 | generic_unit_system.length
instance.parameters.length_z = 3 | generic_unit_system.length
instance.parameters.x_boundary_conditions = "periodic","periodic"
instance.parameters.y_boundary_conditions = "periodic","periodic"
instance.parameters.z_boundary_conditions = "periodic","periodic"
result = instance.commit_parameters()
firstx = instance.potential_grid[0][0][0].x
print firstx
self.assertEquals(firstx, -0.05 | generic_unit_system.length)
allx = instance.potential_grid[0].x
for j in range(20):
for k in range(40):
self.assertEquals(allx[j][k], firstx)
instance.stop()
def test2(self):
instance=self.new_instance(Athena)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.setup_mesh(10, 10, 1, 1.0 | generic_unit_system.length, 1.0 | generic_unit_system.length , 0.0 | generic_unit_system.length)
instance.parameters.mesh_size = (10,10,1)
instance.parameters.length_x = 1.0 | generic_unit_system.length
instance.parameters.length_y = 1.0 | generic_unit_system.length
instance.parameters.length_z = 0.0 | generic_unit_system.length
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
density = generic_unit_system.mass / (generic_unit_system.length ** 3)
momentum = generic_unit_system.mass / (generic_unit_system.time * (generic_unit_system.length**2))
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid = datamodel.Grid(10,10,1)
grid.rho = 0.1 | density
grid.rhovx = 0.0 | momentum
grid.rhovy = 0.0 | momentum
grid.rhovz = 0.0 | momentum
grid.energy = 0.0 | energy
channel = grid.new_channel_to(instance.grid)
channel.copy()
print instance.grid[1].rho
self.assertEquals(instance.grid[1][1][0].rho, 0.1 | density)
for x in instance.grid[1].rho.value_in(density).flatten():
self.assertEquals(x, 0.1)
instance.evolve_model(1.0 | generic_unit_system.time)
for x in instance.grid.rho.value_in(density).flatten():
self.assertEquals(x, 0.1)
instance.evolve_model(10.0 | generic_unit_system.time)
for x in instance.grid.rho.value_in(density).flatten():
self.assertEquals(x, 0.1)
instance.stop()
def test3(self):
instance=self.new_instance(Athena)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.parameters.nx = 10
instance.parameters.ny = 10
instance.parameters.nz = 1
instance.parameters.length_x = 1.0 | generic_unit_system.length
instance.parameters.length_y = 1.0 | generic_unit_system.length
instance.parameters.length_z = 0.0 | generic_unit_system.length
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
instance.set_has_external_gravitational_potential(1)
density = generic_unit_system.mass / (generic_unit_system.length ** 3)
momentum = generic_unit_system.mass / (generic_unit_system.time * (generic_unit_system.length**2))
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid = datamodel.Grid(10,10,1)
grid.rho = 0.1 | density
grid.rhovx = 0.0 | momentum
grid.rhovy = 0.0 | momentum
grid.rhovz = 0.0 | momentum
grid.energy = 0.0 | energy
self.assertEquals(grid.get_defined_settable_attribute_names(), ['energy', 'rho', 'rhovx', 'rhovy', 'rhovz', ] )
channel = grid.new_channel_to(instance.grid)
channel.copy()
potential_grid = datamodel.Grid(12,12,1)
potential_grid.potential = 0.0 | generic_unit_system.potential
channel = potential_grid.new_channel_to(instance.potential_grid)
channel.copy()
result = instance.initialize_grid()
self.assertEquals(instance.grid[1][1][0].rho, 0.1 | density)
for x in instance.grid[1].rho.value_in(density).flatten():
self.assertEquals(x, 0.1)
instance.evolve_model(1.0 | generic_unit_system.time)
for x in instance.grid.rho.value_in(density).flatten():
self.assertEquals(x, 0.1)
instance.evolve_model(10.0 | generic_unit_system.time)
for x in instance.grid.rho.value_in(density).flatten():
self.assertEquals(x, 0.1)
instance.stop()
def test4(self):
instance=self.new_instance(Athena)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.parameters.nx = 10
instance.parameters.ny = 10
instance.parameters.nz = 1
instance.parameters.length_x = 1.0 | generic_unit_system.length
instance.parameters.length_y = 1.0 | generic_unit_system.length
instance.parameters.length_z = 0.0 | generic_unit_system.length
instance.set_boundary("outflow","outflow","outflow","outflow","outflow","outflow")
instance.set_has_external_gravitational_potential(1)
density = generic_unit_system.mass / (generic_unit_system.length ** 3)
momentum = generic_unit_system.mass / (generic_unit_system.time * (generic_unit_system.length**2))
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid = datamodel.Grid(10,10,1)
grid.rho = 0.1 | density
grid.rhovx = 0.0 | momentum
grid.rhovy = 0.0 | momentum
grid.rhovz = 0.0 | momentum
grid.energy = 0.0 | energy
channel = grid.new_channel_to(instance.grid)
channel.copy()
potential_grid = datamodel.Grid(12,12,1)
potential_grid.potential = 0.0 | generic_unit_system.potential
x = instance.potential_grid.x
y = instance.potential_grid.y
print 1
for i in range(12):
for j in range(12):
px = x[i][j][0].value_in(generic_unit_system.length)
py = y[i][j][0].value_in(generic_unit_system.length)
potential = (math.sin(py * math.pi)+math.sin(px *math.pi)) / 200.0
if px < 0 or px > 1.0:
potential = 0.0
if py < 0 or py > 1.0:
potential = 0.0
#print potential
potential_grid.potential[i][j][0] = -0.001 * generic_unit_system.potential.new_quantity([potential])
#instance.potential_grid[i][j][0].potential = -0.001 * generic_unit_system.potential.new_quantity([potential])
channel = potential_grid.new_channel_to(instance.potential_grid)
channel.copy()
print 2
result = instance.initialize_grid()
self.assertEquals(instance.grid[1][1][0].rho, 0.1 | density)
for x in instance.grid[1].rho.value_in(density).flatten():
self.assertAlmostRelativeEquals(x, 0.1)
#print instance.potential_grid[...,0].potential
instance.evolve_model(1.0 | generic_unit_system.time)
#print "--------------------------"
#print instance.grid.rho[...,0]
z = instance.grid.rho[...,0]
#z = instance.potential_grid.potential[...,0]
#z = z.value_in(generic_unit_system.potential)
z = z.value_in(density)
#from matplotlib import pyplot
#x = instance.potential_grid[...,50,0].x
##y = instance.potential_grid[...,50,0].y
#z = instance.potential_grid[...,50,0].z
#pyplot.plot(x.value_in(generic_unit_system.length), instance.potential_grid[...,50,0].potential.value_in(generic_unit_system.potential))
#dx = x[1] - x[0]
#x += 1.5 * dx
#interpolated = instance.get_interpolated_gravitational_potential(x,y,z)
#pyplot.plot(x.value_in(generic_unit_system.length), interpolated.value_in(generic_unit_system.potential))
#img = pyplot.imshow(z)
#img.set_interpolation('none')
#pyplot.savefig("bla.png")
for x in instance.grid.rho.value_in(density).flatten():
self.assertNotEquals(x, 0.1)
instance.stop()
def test5(self):
instance=self.new_instance(Athena)
self.assertAlmostRelativeEquals(instance.parameters.isothermal_sound_speed, 0.0 | generic_unit_system.speed)
instance.parameters.isothermal_sound_speed = 0.1 | generic_unit_system.speed
self.assertAlmostRelativeEquals(instance.parameters.isothermal_sound_speed, 0.1 | generic_unit_system.speed)
self.assertAlmostRelativeEquals(instance.parameters.gamma, 1.66666666666666667)
instance.parameters.gamma = 0.1
self.assertAlmostRelativeEquals(instance.parameters.gamma, 0.1)
self.assertAlmostRelativeEquals(instance.parameters.courant_number, 0.3)
instance.parameters.courant_number = 0.1
self.assertAlmostRelativeEquals(instance.parameters.courant_number, 0.1)
print instance.parameters
instance.stop()
def test6(self):
instance=self.new_instance(Athena)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.setup_mesh(10 , 20, 40, 1.0 | generic_unit_system.length, 1.0 | generic_unit_system.length, 1.0 | generic_unit_system.length)
instance.set_boundary("periodic","periodic","periodic","periodic","periodic","periodic")
density = generic_unit_system.mass / (generic_unit_system.length ** 3)
momentum = generic_unit_system.mass / (generic_unit_system.time * (generic_unit_system.length**2))
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid = datamodel.new_regular_grid((10,10,10), [10.0, 10.0, 10.0] | units.m)
grid.rho = 0.4 | density
grid.rhovx = 0.1 | momentum
grid.rhovy = 0.2 | momentum
grid.rhovz = 0.3 | momentum
grid.energy = 0.5 | energy
channel = grid.new_channel_to(instance.grid)
channel.copy()
self.assertEquals(instance.grid[0][0][0].rho, 0.4 | density)
self.assertEquals(instance.grid.rho.number.ndim, 3)
self.assertEquals(len(list(instance.itergrids())), 1)
instance.stop()
def test7(self):
instance=self.new_instance(Athena)
instance.parameters.isothermal_sound_speed = 0.1 | generic_unit_system.speed
instance.parameters.gamma = 0.1
instance.parameters.courant_number = 0.1
instance.parameters.nx = 10
instance.parameters.ny = 20
instance.parameters.nz = 40
instance.parameters.length_x = 10 | generic_unit_system.length
instance.parameters.length_y = 20 | generic_unit_system.length
instance.parameters.length_z = 30 | generic_unit_system.length
print instance.parameters
instance.commit_parameters()
mini,maxi, minj,maxj, mink,maxk = instance.get_index_range_inclusive()
self.assertEquals(mini, 0)
self.assertEquals(maxi, 9)
self.assertEquals(minj, 0)
self.assertEquals(maxj, 19)
self.assertEquals(mink, 0)
self.assertEquals(maxk, 39)
self.assertEquals(instance.parameters.mesh_size, (10,20,40))
print instance.parameters
instance.stop()
def test8(self):
instance=self.new_instance(Athena)
instance.parameters.stopping_conditions_number_of_steps = 10
self.assertEquals(instance.parameters.stopping_conditions_number_of_steps, 10)
instance.stop()
def test8a(self):
instance=self.new_instance(Athena)
instance.parameters.stopping_conditions_timeout = 10 | units.s
self.assertEquals(instance.parameters.stopping_conditions_timeout, 10|units.s)
instance.stop()
def test9(self):
instance=self.new_instance(Athena)
instance.parameters.x_boundary_conditions = "periodic","periodic"
instance.parameters.y_boundary_conditions = "periodic","periodic"
instance.parameters.z_boundary_conditions = "periodic","periodic"
self.assertEquals(instance.parameters.xbound1, "periodic")
instance.stop()
def xtest10(self):
instance=self.new_instance(Athena)
instance.parameters.gamma = 5/3.0
instance.parameters.courant_number=0.3
n = 100
instance.parameters.nx = n
instance.parameters.ny = n
instance.parameters.nz = n
instance.parameters.length_x = 1 | generic_unit_system.length
instance.parameters.length_y = 1 | generic_unit_system.length
instance.parameters.length_z = 1 | generic_unit_system.length
instance.x_boundary_conditions = ("periodic","periodic")
instance.y_boundary_conditions = ("periodic","periodic")
instance.z_boundary_conditions = ("periodic","periodic")
result = instance.commit_parameters()
density = generic_unit_system.mass / (generic_unit_system.length**3)
density = generic_unit_system.density
momentum = generic_unit_system.speed * generic_unit_system.density
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid = datamodel.Grid(n,n,n)
grid.rho = 0.0 | generic_unit_system.density
grid.rhovx = 0.0 | momentum
grid.rhovy = 0.0 | momentum
grid.rhovz = 0.0 | momentum
grid.energy = 0.0 | energy
halfway = n/2 - 1
grid[:halfway].rho = 4.0 | generic_unit_system.density
grid[:halfway].energy = (1.0 | energy)/ (instance.parameters.gamma - 1)
grid[halfway:].rho = 1.0 | generic_unit_system.density
grid[halfway:].energy = (0.1795 | energy)/ (instance.parameters.gamma - 1)
channel = grid.new_channel_to(instance.grid)
channel.copy()
#from amuse import plot
#from matplotlib import pyplot
#print grid.rho[...,0,0]
#plot.plot(instance.grid.x[...,0,0], grid.rho[...,0,0])
#pyplot.savefig("bla1.png")
error = instance.initialize_grid()
instance.evolve_model(0.12 | generic_unit_system.time)
channel = instance.grid.new_channel_to(grid)
channel.copy()
#print grid.rho[...,0,0]
#plot.plot(instance.grid.x[...,0,0], grid.rho[...,0,0])
#pyplot.savefig("bla2.png")
instance.stop()
def test11(self):
instance=self.new_instance(Athena, mode=AthenaInterface.MODE_SELF_GRAVITY) #, redirection = "none") #, debugger="gdb")
instance.parameters.gamma = 5/3.0
instance.parameters.courant_number=0.3
density = generic_unit_system.mass / (generic_unit_system.length ** 3)
momentum = generic_unit_system.mass / (generic_unit_system.time * (generic_unit_system.length**2))
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
instance.parameters.four_pi_G = 4 * numpy.pi * (1|(generic_unit_system.length**3) / (generic_unit_system.mass * (generic_unit_system.time**2))) # G = 1, like nbody
instance.parameters.gravity_mean_rho = 0.0 | density
datamodel.Grid.add_global_vector_attribute("position", ["x","y","z"])
n = 10
instance.parameters.nx = n
instance.parameters.ny = n
instance.parameters.nz = n
instance.parameters.length_x = 4.0 | generic_unit_system.length
instance.parameters.length_y = 4.0 | generic_unit_system.length
instance.parameters.length_z = 4.0 | generic_unit_system.length
instance.x_boundary_conditions = ("periodic","periodic")
instance.y_boundary_conditions = ("periodic","periodic")
instance.z_boundary_conditions = ("periodic","periodic")
result = instance.commit_parameters()
grid = datamodel.new_regular_grid((n,n,n), [4.0 , 4.0, 4.0] | generic_unit_system.length)
grid.rho = 0.0 | generic_unit_system.density
grid.rhovx = 0.0 | momentum
grid.rhovy = 0.0 | momentum
grid.rhovz = 0.0 | momentum
grid.energy = 0.001 | energy
scaled_radius = 1.0 / 1.695 | generic_unit_system.length
total_mass = 1.0 | generic_unit_system.mass
radii = (grid.position - ([2.0, 2.0, 2.0] | generic_unit_system.length)).lengths()
rho_sphere = ((0.75 * total_mass / (numpy.pi * (scaled_radius ** 3))))
grid.rho = (rho_sphere * ((1 + (radii ** 2) / (scaled_radius ** 2))**(-5.0/2.0)))
internal_energy = (0.25 | generic_unit_system.time ** -2 * generic_unit_system.mass ** -1 * generic_unit_system.length **3) * total_mass / scaled_radius
grid.energy = grid.rho * internal_energy/(1+(radii/scaled_radius)**2)**(1.0/2.0)
channel = grid.new_channel_to(instance.grid)
channel.copy()
instance.initialize_grid()
instance.evolve_model(0.01 | generic_unit_system.time)
G = 1.0 | generic_unit_system.length **3 * generic_unit_system.mass**-1 * generic_unit_system.time**-2
a = instance.grid[5][5].gravitational_potential
b = (-1 * G * total_mass / (radii**2+scaled_radius**2).sqrt()) [5][5]
for x in a:
self.assertTrue(x < 0 | generic_unit_system.potential)
a = instance.grid[5][5].gravitational_acceleration_z
for index, x in enumerate(a):
if index < 5:
self.assertTrue(x > 0 | generic_unit_system.acceleration)
else:
self.assertTrue(x < 0 | generic_unit_system.acceleration)
instance.stop()
def test12(self):
print "Testing Athena grid setters"
instance=self.new_instance(Athena)
instance.parameters.isothermal_sound_speed = 0.1 | generic_unit_system.speed
instance.parameters.gamma = 5/3.0
instance.parameters.courant_number = 0.3
instance.parameters.mesh_size = (2, 2, 2)
instance.parameters.mesh_length = [1.0, 1.0, 1.0] | generic_unit_system.length
instance.parameters.x_boundary_conditions = ("periodic", "periodic")
instance.parameters.y_boundary_conditions = ("periodic", "periodic")
instance.parameters.z_boundary_conditions = ("periodic", "periodic")
instance.grid.rho = 1.0 | generic_unit_system.density
self.assertAlmostEquals(instance.grid.rho,
numpy.ones((2,2,2)) | generic_unit_system.density)
instance.grid.momentum = numpy.reshape(
numpy.arange(0.0, 3.0, 0.125), (2,2,2,3)) | generic_unit_system.momentum_density
self.assertAlmostEquals(instance.grid.rhovx,
numpy.reshape(numpy.arange(0.000, 3.0, 0.375), (2,2,2)) | generic_unit_system.momentum_density)
self.assertAlmostEquals(instance.grid.rhovy,
numpy.reshape(numpy.arange(0.125, 3.0, 0.375), (2,2,2)) | generic_unit_system.momentum_density)
self.assertAlmostEquals(instance.grid.rhovz,
numpy.reshape(numpy.arange(0.250, 3.0, 0.375), (2,2,2)) | generic_unit_system.momentum_density)
momentum = instance.grid.momentum
rhovx = -momentum.x
rhovy = 2 * momentum.z
rhovz = -0.5 * momentum.y
instance.grid.momentum = VectorQuantity.new_from_scalar_quantities(rhovx,rhovy,rhovz).transpose(axes=(1,2,3,0))
self.assertAlmostEquals(instance.grid.rhovx,
numpy.reshape(numpy.arange(0.000, -3.0, -0.375), (2,2,2)) | generic_unit_system.momentum_density)
self.assertAlmostEquals(instance.grid.rhovy,
numpy.reshape(numpy.arange(0.5, 6.0, 0.75), (2,2,2)) | generic_unit_system.momentum_density)
self.assertAlmostEquals(instance.grid.rhovz,
numpy.reshape(numpy.arange(-0.0625, -1.5, -0.1875), (2,2,2)) | generic_unit_system.momentum_density)
instance.grid[...,0,...].momentum = [12.0, 13.0, 14.0] | generic_unit_system.momentum_density
self.assertAlmostEquals(instance.grid[0,...].rhovx,
[[12.0, 12.0], [-0.75, -1.125]] | generic_unit_system.momentum_density)
self.assertAlmostEquals(instance.grid[0,...].rhovy,
[[13.0, 13.0], [2.0, 2.75]] | generic_unit_system.momentum_density)
self.assertAlmostEquals(instance.grid[...,0].rhovz,
[[14.0, -0.4375], [14.0, -1.1875]] | generic_unit_system.momentum_density)
instance.grid.energy = numpy.reshape(numpy.arange(0.0, 1.0, 0.125), (2,2,2)) | generic_unit_system.energy_density
self.assertAlmostEquals(instance.grid[...,0,0].energy,
[0.0, 0.5] | generic_unit_system.energy_density)
self.assertAlmostEquals(instance.grid[0,...,0].energy,
[0.0, 0.25] | generic_unit_system.energy_density)
self.assertAlmostEquals(instance.grid[0,0,...].energy,
[0.0, 0.125] | generic_unit_system.energy_density)
instance.initialize_grid()
instance.stop()
def test13(self):
converter = generic_unit_converter.ConvertBetweenGenericAndSiUnits(
1 | units.parsec,
1 | units.Myr,
1 | units.MSun
)
instance=self.new_instance(Athena, unit_converter = converter)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.parameters.mesh_size = (10 , 20, 40)
instance.parameters.mesh_length = [1.0, 1.0, 1.0] | units.parsec
instance.parameters.x_boundary_conditions = ("periodic", "periodic")
instance.parameters.y_boundary_conditions = ("periodic", "periodic")
instance.parameters.z_boundary_conditions = ("periodic", "periodic")
density = units.MSun / (units.parsec ** 3)
momentum = units.MSun / (units.Myr * units.parsec ** 2 )
energy = units.MSun / (units.parsec * units.Myr ** 2)
grid = datamodel.new_regular_grid((10,20,40), [1.0, 1.0, 1.0] | units.parsec )
grid.rho = 0.4 | density
grid.rhovx = 0.1 | momentum
grid.rhovy = 0.2 | momentum
grid.rhovz = 0.3 | momentum
grid.energy = 0.5 | energy
channel = grid.new_channel_to(instance.grid)
channel.copy()
print instance.grid[0].rho
self.assertAlmostRelativeEquals(instance.grid[0].rho, 0.4 | density)
self.assertAlmostRelativeEquals(instance.grid[0].rhovx, 0.1 | momentum)
self.assertAlmostRelativeEquals(instance.grid[0].rhovy, 0.2 | momentum)
self.assertAlmostRelativeEquals(instance.grid[0].rhovz, 0.3 | momentum)
self.assertAlmostRelativeEquals(instance.grid[0].energy, 0.5 | energy)
self.assertEquals(instance.grid.rho.number.ndim, 3)
self.assertEquals(len(list(instance.itergrids())), 1)
instance.stop()
def test14(self):
instance=self.new_instance(Athena)
instance.parameters.mesh_size = (10 , 1, 1)
instance.parameters.mesh_length = [1.0, 1.0, 1.0] | generic_unit_system.length
instance.parameters.x_boundary_conditions = ("interface", "outflow")
instance.parameters.stopping_conditions_number_of_steps = 1
grid = datamodel.new_regular_grid((10,1,1), [1.0, 1.0, 1.0] | generic_unit_system.length )
density = generic_unit_system.density
momentum = generic_unit_system.speed * generic_unit_system.density
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid.rho = 0.01 | density
grid.rhovx = 0.1 | momentum
grid.rhovy = 0.0 | momentum
grid.rhovz = 0.0 | momentum
p = 1.0 | (generic_unit_system.mass / (generic_unit_system.length * generic_unit_system.time**2))
grid.energy = p / (instance.parameters.gamma - 1)
grid.energy += 0.5 * (grid.rhovx ** 2 + grid.rhovy ** 2 + grid.rhovz ** 2) / grid.rho
channel = grid.new_channel_to(instance.grid)
channel.copy()
instance.stopping_conditions.number_of_steps_detection.enable()
#instance.grid.boundaries.left.
xbound1 = instance.get_boundary_grid('xbound1')
self.assertEquals(xbound1.shape, (4,1,1))
memxbound1 = xbound1.copy()
memxbound1.rho = 0.02 | density
memxbound1.rhovx = 0.2 | momentum
memxbound1.rhovy = 0.0 | momentum
memxbound1.rhovz = 0.0 | momentum
memxbound1.energy = p / (instance.parameters.gamma - 1)
memxbound1.energy += 0.5 * (memxbound1.rhovx ** 2 + memxbound1.rhovy ** 2 + memxbound1.rhovz ** 2) / memxbound1.rho
channel = memxbound1.new_channel_to(xbound1)
channel.copy()
instance.evolve_model(1.0 | generic_unit_system.time)
print instance.stopping_conditions.number_of_steps_detection.is_set()
rho = instance.grid.rho[...,0,0]
self.assertAlmostRelativeEquals(rho[-1], 0.01 | density)
self.assertTrue(rho[0] > 0.01 | density)
self.assertTrue(instance.grid.rhovx[0,0,0] > 0.1 | momentum)
self.assertAlmostRelativeEquals(instance.grid.rhovx[-1,0,0] , 0.1 | momentum)
print instance.model_time
instance.stopping_conditions.number_of_steps_detection.disable()
instance.evolve_model(1.0 | generic_unit_system.time)
rho = instance.grid.rho[...,0,0]
self.assertAlmostRelativeEquals(rho, 0.02 | density, 8)
self.assertAlmostRelativeEquals(instance.grid.rhovx[...,0,0], 0.2 | momentum, 8)
print instance.model_time
instance.stop()
def test15(self):
instance=self.new_instance(Athena)
instance.initialize_code()
instance.stop()
def test16(self):
instance=self.new_instance(Athena)
instance.parameters.mesh_size = (10 , 1, 1)
instance.parameters.mesh_length = [1.0, 1.0, 1.0] | generic_unit_system.length
instance.parameters.x_boundary_conditions = ("outflow", "interface")
instance.parameters.stopping_conditions_number_of_steps = 1
grid = datamodel.new_regular_grid((10,1,1), [1.0, 1.0, 1.0] | generic_unit_system.length )
density = generic_unit_system.density
momentum = generic_unit_system.speed * generic_unit_system.density
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid.rho = 0.01 | density
grid.rhovx = -0.1 | momentum
grid.rhovy = 0.0 | momentum
grid.rhovz = 0.0 | momentum
p = 1.0 | (generic_unit_system.mass / (generic_unit_system.length * generic_unit_system.time**2))
grid.energy = p / (instance.parameters.gamma - 1)
grid.energy += 0.5 * (grid.rhovx ** 2 + grid.rhovy ** 2 + grid.rhovz ** 2) / grid.rho
channel = grid.new_channel_to(instance.grid)
channel.copy()
instance.stopping_conditions.number_of_steps_detection.enable()
#instance.grid.boundaries.left.
xbound = instance.get_boundary_grid('xbound2')
self.assertEquals(xbound.shape, (4,1,1))
memxbound = xbound.copy()
memxbound.rho = 0.02 | density
memxbound.rhovx = -0.2 | momentum
memxbound.rhovy = 0.0 | momentum
memxbound.rhovz = 0.0 | momentum
memxbound.energy = p / (instance.parameters.gamma - 1)
memxbound.energy += 0.5 * (memxbound.rhovx ** 2 + memxbound.rhovy ** 2 + memxbound.rhovz ** 2) / memxbound.rho
channel = memxbound.new_channel_to(xbound)
channel.copy()
instance.evolve_model(1.0 | generic_unit_system.time)
print instance.stopping_conditions.number_of_steps_detection.is_set()
rho = instance.grid.rho[...,0,0]
self.assertAlmostRelativeEquals(rho[0], 0.01 | density)
self.assertTrue(rho[-1] > 0.01 | density)
self.assertTrue(instance.grid.rhovx[-1,0,0] < -0.1 | momentum)
self.assertAlmostRelativeEquals(instance.grid.rhovx[0,0,0] , -0.1 | momentum)
print instance.model_time
instance.stopping_conditions.number_of_steps_detection.disable()
instance.evolve_model(1.0 | generic_unit_system.time)
rho = instance.grid.rho[...,0,0]
self.assertAlmostRelativeEquals(rho, 0.02 | density, 8)
self.assertAlmostRelativeEquals(instance.grid.rhovx[...,0,0], -0.2 | momentum, 8)
print instance.model_time
instance.stop()
def test17(self):
instance=self.new_instance(Athena, number_of_workers = 2)
instance.set_parallel_decomposition(1,2,1)
instance.parameters.mesh_size = (10,4,1)
instance.parameters.mesh_length = [1.0, 1.0, 1.0] | generic_unit_system.length
instance.parameters.x_boundary_conditions = ("interface", "outflow")
instance.parameters.y_boundary_conditions = ("periodic", "periodic")
instance.parameters.stopping_conditions_number_of_steps = 1
grid = datamodel.new_regular_grid((10,4,1), [1.0, 1.0, 1.0] | generic_unit_system.length )
density = generic_unit_system.density
momentum = generic_unit_system.speed * generic_unit_system.density
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid.rho = 0.01 | density
grid.rhovx = 0.1 | momentum
grid.rhovy = 0.0 | momentum
grid.rhovz = 0.0 | momentum
p = 1.0 | (generic_unit_system.mass / (generic_unit_system.length * generic_unit_system.time**2))
grid.energy = p / (instance.parameters.gamma - 1)
grid.energy += 0.5 * (grid.rhovx ** 2 + grid.rhovy ** 2 + grid.rhovz ** 2) / grid.rho
channel = grid.new_channel_to(instance.grid)
channel.copy()
instance.stopping_conditions.number_of_steps_detection.enable()
xbound = instance.get_boundary_grid('xbound1')
self.assertEquals(xbound.shape, (4,4,1))
memxbound = xbound.copy()
memxbound.rho = 0.02 | density
memxbound.rhovx = 0.2 | momentum
memxbound.rhovy = 0.0 | momentum
memxbound.rhovz = 0.0 | momentum
memxbound.energy = p / (instance.parameters.gamma - 1)
memxbound.energy += 0.5 * (memxbound.rhovx ** 2 + memxbound.rhovy ** 2 + memxbound.rhovz ** 2) / memxbound.rho
channel = memxbound.new_channel_to(xbound)
channel.copy()
instance.evolve_model(1.0 | generic_unit_system.time)
print instance.stopping_conditions.number_of_steps_detection.is_set()
print instance.grid.rho
rho = instance.grid.rho[...,0,0]
self.assertAlmostRelativeEquals(rho[-1], 0.01 | density)
self.assertTrue(rho[0] > 0.01 | density)
self.assertTrue(instance.grid.rhovx[0,0,0] > 0.1 | momentum)
self.assertAlmostRelativeEquals(instance.grid.rhovx[-1,0,0] , 0.1 | momentum)
print instance.model_time
instance.stopping_conditions.number_of_steps_detection.disable()
instance.evolve_model(1.0 | generic_unit_system.time)
rho = instance.grid.rho[...,0,0]
self.assertAlmostRelativeEquals(rho, 0.02 | density, 8)
self.assertAlmostRelativeEquals(instance.grid.rhovx[...,0,0], 0.2 | momentum, 8)
print instance.model_time
instance.stop()
def test18(self):
instance=self.new_instance(Athena, number_of_workers = 2)
instance.set_parallel_decomposition(2,1,1)
instance.parameters.mesh_size = (4,10,1)
instance.parameters.mesh_length = [1.0, 1.0, 1.0] | generic_unit_system.length
instance.parameters.x_boundary_conditions = ("periodic", "periodic")
instance.parameters.y_boundary_conditions = ("interface", "outflow")
instance.parameters.stopping_conditions_number_of_steps = 1
grid = datamodel.new_regular_grid((4,10,1), [1.0, 1.0, 1.0] | generic_unit_system.length )
density = generic_unit_system.density
momentum = generic_unit_system.speed * generic_unit_system.density
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid.rho = 0.01 | density
grid.rhovx = 0.0 | momentum
grid.rhovy = 0.1 | momentum
grid.rhovz = 0.0 | momentum
p = 1.0 | (generic_unit_system.mass / (generic_unit_system.length * generic_unit_system.time**2))
grid.energy = p / (instance.parameters.gamma - 1)
grid.energy += 0.5 * (grid.rhovx ** 2 + grid.rhovy ** 2 + grid.rhovz ** 2) / grid.rho
channel = grid.new_channel_to(instance.grid)
channel.copy()
instance.stopping_conditions.number_of_steps_detection.enable()
ybound = instance.get_boundary_grid('ybound1')
self.assertEquals(ybound.shape, (4+8,4,1))
memybound = ybound.copy()
memybound.rho = 0.02 | density
memybound.rhovx = 0.0 | momentum
memybound.rhovy = 0.2 | momentum
memybound.rhovz = 0.0 | momentum
memybound.energy = p / (instance.parameters.gamma - 1)
memybound.energy += 0.5 * (memybound.rhovx ** 2 + memybound.rhovy ** 2 + memybound.rhovz ** 2) / memybound.rho
channel = memybound.new_channel_to(ybound)
channel.copy()
instance.evolve_model(1.0 | generic_unit_system.time)
print instance.stopping_conditions.number_of_steps_detection.is_set()
print instance.grid.rho
rho = instance.grid.rho[0,...,0]
self.assertAlmostRelativeEquals(rho[-1], 0.01 | density)
self.assertTrue(rho[0] > 0.01 | density)
self.assertTrue(instance.grid.rhovy[0,0,0] > 0.1 | momentum)
self.assertAlmostRelativeEquals(instance.grid.rhovy[0,-1,0] , 0.1 | momentum)
print instance.model_time
instance.stopping_conditions.number_of_steps_detection.disable()
instance.evolve_model(1.0 | generic_unit_system.time)
rho = instance.grid.rho[0,...,0]
self.assertAlmostRelativeEquals(rho, 0.02 | density, 8)
self.assertAlmostRelativeEquals(instance.grid.rhovy[0,...,0], 0.2 | momentum, 8)
print instance.model_time
instance.stop()
def test19(self):
instance=self.new_instance(Athena, number_of_workers = 1)
instance.set_parallel_decomposition(1,1,1)
instance.parameters.mesh_size = (4,5,6)
instance.parameters.mesh_length = [1.0, 1.0, 1.0] | generic_unit_system.length
instance.parameters.x_boundary_conditions = ("periodic", "periodic")
instance.parameters.y_boundary_conditions = ("periodic", "periodic")
instance.parameters.z_boundary_conditions = ("interface", "outflow")
instance.parameters.stopping_conditions_number_of_steps = 1
grid = datamodel.new_regular_grid((4,5,6), [1.0, 1.0, 1.0] | generic_unit_system.length )
density = generic_unit_system.density
momentum = generic_unit_system.speed * generic_unit_system.density
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid.rho = 0.01 | density
grid.rhovx = 0.0 | momentum
grid.rhovy = 0.0 | momentum
grid.rhovz = 0.1 | momentum
p = 1.0 | (generic_unit_system.mass / (generic_unit_system.length * generic_unit_system.time**2))
grid.energy = p / (instance.parameters.gamma - 1)
grid.energy += 0.5 * (grid.rhovx ** 2 + grid.rhovy ** 2 + grid.rhovz ** 2) / grid.rho
channel = grid.new_channel_to(instance.grid)
channel.copy()
instance.stopping_conditions.number_of_steps_detection.enable()
zbound = instance.get_boundary_grid('zbound1')
self.assertEquals(zbound.shape, (4+8,5+8,4))
memzbound = zbound.copy()
memzbound.rho = 0.02 | density
memzbound.rhovx = 0.0 | momentum
memzbound.rhovy = 0.0 | momentum
memzbound.rhovz = 0.2 | momentum
memzbound.energy = p / (instance.parameters.gamma - 1)
memzbound.energy += 0.5 * (memzbound.rhovx ** 2 + memzbound.rhovy ** 2 + memzbound.rhovz ** 2) / memzbound.rho
channel = memzbound.new_channel_to(zbound)
channel.copy()
instance.evolve_model(1.0 | generic_unit_system.time)
rho = instance.grid.rho[0,0,...]
self.assertAlmostRelativeEquals(rho[-1], 0.01 | density)
self.assertTrue(rho[0] > 0.01 | density)
self.assertTrue(instance.grid.rhovz[0,0,0] > 0.1 | momentum)
self.assertAlmostRelativeEquals(instance.grid.rhovz[0,0,-1] , 0.1 | momentum)
print instance.model_time
instance.stopping_conditions.number_of_steps_detection.disable()
instance.evolve_model(1.0 | generic_unit_system.time)
rho = instance.grid.rho[0,...,0]
self.assertAlmostRelativeEquals(rho, 0.02 | density, 8)
self.assertAlmostRelativeEquals(instance.grid.rhovz[0,0,...], 0.2 | momentum, 8)
print instance.model_time
instance.stop()
def test20(self):
instance=self.new_instance(Athena, number_of_workers = 4)
instance.parameters.parallel_decomposition = (2,2,1)
instance.parameters.mesh_size = (4,5,6)
instance.parameters.mesh_length = [1.0, 1.0, 1.0] | generic_unit_system.length
instance.parameters.x_boundary_conditions = ("periodic", "periodic")
instance.parameters.y_boundary_conditions = ("periodic", "periodic")
instance.parameters.z_boundary_conditions = ("outflow", "interface")
instance.parameters.stopping_conditions_number_of_steps = 1
grid = datamodel.new_regular_grid((4,5,6), [1.0, 1.0, 1.0] | generic_unit_system.length )
density = generic_unit_system.density
momentum = generic_unit_system.speed * generic_unit_system.density
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
grid.rho = 0.01 | density
grid.rhovx = 0.0 | momentum
grid.rhovy = 0.0 | momentum
grid.rhovz = -0.1 | momentum
p = 1.0 | (generic_unit_system.mass / (generic_unit_system.length * generic_unit_system.time**2))
grid.energy = p / (instance.parameters.gamma - 1)
grid.energy += 0.5 * (grid.rhovx ** 2 + grid.rhovy ** 2 + grid.rhovz ** 2) / grid.rho
channel = grid.new_channel_to(instance.grid)
channel.copy()
instance.stopping_conditions.number_of_steps_detection.enable()
zbound = instance.get_boundary_grid('zbound2')
self.assertEquals(zbound.shape, (4+8,5+8,4))
memzbound = zbound.copy()
memzbound.rho = 0.02 | density
memzbound.rhovx = 0.0 | momentum
memzbound.rhovy = 0.0 | momentum
memzbound.rhovz = -0.2 | momentum
memzbound.energy = p / (instance.parameters.gamma - 1)
memzbound.energy += 0.5 * (memzbound.rhovx ** 2 + memzbound.rhovy ** 2 + memzbound.rhovz ** 2) / memzbound.rho
channel = memzbound.new_channel_to(zbound)
channel.copy()
instance.evolve_model(1.0 | generic_unit_system.time)
rho = instance.grid.rho[0,0,...]
self.assertAlmostRelativeEquals(rho[0], 0.01 | density)
self.assertTrue(rho[-1] > 0.01 | density)
self.assertTrue(instance.grid.rhovz[0,0,-1] < -0.1 | momentum)
self.assertAlmostRelativeEquals(instance.grid.rhovz[0,0,0] , -0.1 | momentum)
print instance.model_time
instance.stopping_conditions.number_of_steps_detection.disable()
instance.evolve_model(1.0 | generic_unit_system.time)
rho = instance.grid.rho[0,...,0]
self.assertAlmostRelativeEquals(rho, 0.02 | density, 8)
self.assertAlmostRelativeEquals(instance.grid.rhovz[0,0,...], -0.2 | momentum, 8)
print instance.model_time
instance.stop()
def test21(self):
instance=self.new_instance(Athena)
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.mesh_length = (20.0, 1, 1) | generic_unit_system.length
instance.parameters.mesh_size = (20, 1, 1)
for x in instance.itergrids():
inmem = x.copy()
inmem.rho = inmem.x/(1| generic_unit_system.length) | generic_unit_system.density
inmem.rhovx = 0.0 | generic_unit_system.momentum_density
inmem.energy = 1.0 | generic_unit_system.energy_density
from_model_to_code = inmem.new_channel_to(x)
from_model_to_code.copy()
print inmem.rho
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(0.5| generic_unit_system.length,0.0| generic_unit_system.length,0.0| generic_unit_system.length)
self.assertEquals(rho , 0.5 | generic_unit_system.density)
for value in numpy.arange(0.5, 19.6, 0.1):
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(
value | generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , value | generic_unit_system.density)
for value in numpy.arange(0.0, 0.6, 0.1):
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(
value | generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , ((0.5 + value) * 0.5 + (0.5-value) * 19.5) | generic_unit_system.density)
for value in numpy.arange(0.0, 0.5, 0.1):
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(
value + 19.5| generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , (19.5 - (value * 19)) | generic_unit_system.density, 9)
# out of range
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(
20.0| generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , 0.0 | generic_unit_system.density, 9)
def test22(self):
instance=self.new_instance(Athena, number_of_workers=2)
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.y_boundary_conditions = ("periodic","periodic")
instance.parameters.mesh_length = (20.0, 20.0, 1) | generic_unit_system.length
instance.parameters.mesh_size = (20, 20, 1)
for x in instance.itergrids():
inmem = x.copy()
inmem.rho = (inmem.x + ((inmem.y - (0.5| generic_unit_system.length))* 20.0))/(1| generic_unit_system.length) | generic_unit_system.density
inmem.rhovx = 0.0 | generic_unit_system.momentum_density
inmem.energy = 1.0 | generic_unit_system.energy_density
from_model_to_code = inmem.new_channel_to(x)
from_model_to_code.copy()
print inmem.rho[0], inmem.y[0], inmem.x[0]
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(0.5| generic_unit_system.length,0.5| generic_unit_system.length,0.0| generic_unit_system.length)
self.assertEquals(rho , 0.5 | generic_unit_system.density)
for value in numpy.arange(0.5, 19.6, 0.1):
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(
value | generic_unit_system.length,
0.5 | generic_unit_system.length,
0.0 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , value | generic_unit_system.density)
for x in numpy.arange(8.5, 11.5, 0.25):
for y in numpy.arange(0.5, 19.6, 0.25):
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(
x | generic_unit_system.length,
y | generic_unit_system.length,
0.0 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , x + (20 * (y-0.5)) | generic_unit_system.density)
def test23(self):
instance=self.new_instance(Athena, number_of_workers=3)
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.y_boundary_conditions = ("periodic","periodic")
instance.parameters.z_boundary_conditions = ("periodic","periodic")
instance.parameters.mesh_length = (20.0, 20.0, 20.0) | generic_unit_system.length
instance.parameters.mesh_length = (20.0, 20.0, 20.0) | generic_unit_system.length
instance.parameters.mesh_size = (20, 20, 20)
for x in instance.itergrids():
inmem = x.copy()
inmem.rho = (
(
inmem.x +
((inmem.y - (0.5| generic_unit_system.length))* 20.0) +
((inmem.z - (0.5| generic_unit_system.length))* 400.0)
)
/(1| generic_unit_system.length) | generic_unit_system.density
)
inmem.rhovx = 0.0 | generic_unit_system.momentum_density
inmem.energy = 1.0 | generic_unit_system.energy_density
from_model_to_code = inmem.new_channel_to(x)
from_model_to_code.copy()
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(0.5| generic_unit_system.length,0.5| generic_unit_system.length,0.5| generic_unit_system.length)
self.assertEquals(rho , 0.5 | generic_unit_system.density)
for value in numpy.arange(0.5, 19.6, 0.1):
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(
value | generic_unit_system.length,
0.5 | generic_unit_system.length,
0.5 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , value | generic_unit_system.density)
sample = sample = datamodel.new_regular_grid(
(4, 4, 76),
(2, 2, 19) | generic_unit_system.length
)
sample.x += 9.5 | generic_unit_system.length
sample.y += 9.5 | generic_unit_system.length
sample.z += 0.5 | generic_unit_system.length
x = sample.x.flatten()
y = sample.y.flatten()
z = sample.z.flatten()
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(
x,
y,
z
)
half = 0.5 | generic_unit_system.length
self.assertAlmostRelativeEquals(rho , (x + (20 * (y-half)) + (400 * (z-half)))/(1| generic_unit_system.length) | generic_unit_system.density )
def test24(self):
instance=self.new_instance(Athena, number_of_workers=1)
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.y_boundary_conditions = ("periodic","periodic")
instance.parameters.z_boundary_conditions = ("periodic","periodic")
instance.parameters.mesh_length = (10.0, 1, 1) | generic_unit_system.length
instance.parameters.mesh_size = (20, 1, 1)
instance.set_has_external_gravitational_potential(1)
instance.commit_parameters()
potential_grid = instance.potential_grid
factor = (2 | generic_unit_system.length / generic_unit_system.time**2)
potential_grid.potential = potential_grid.x * factor
x = numpy.arange(0,10.25, 0.1) | generic_unit_system.length
y = 0.5 |generic_unit_system.length
z = 0.5 |generic_unit_system.length
interpolated = instance.get_interpolated_gravitational_potential(x,y,z)
self.assertAlmostRelativeEquals(interpolated, x * factor)
def test25(self):
instance=self.new_instance(Athena, number_of_workers=1)
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.y_boundary_conditions = ("periodic","periodic")
instance.parameters.z_boundary_conditions = ("periodic","periodic")
instance.parameters.mesh_length = (5.0, 10.0, 1) | generic_unit_system.length
instance.parameters.mesh_size = (20, 20, 1)
instance.set_has_external_gravitational_potential(1)
instance.commit_parameters()
potential_grid = instance.potential_grid
factor = (2 | generic_unit_system.length / generic_unit_system.time**2)
potential_grid.potential = potential_grid.y * factor
print potential_grid.y * factor
y = numpy.arange(0,10.25, 0.1) | generic_unit_system.length
x = (y * 0) + (2 |generic_unit_system.length)
z = 0.5 |generic_unit_system.length
interpolated = instance.get_interpolated_gravitational_potential(x,y,z)
print y*factor
self.assertAlmostRelativeEquals(interpolated, y * factor)
def test26(self):
n = 4
instance=self.new_instance(Athena)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.parameters.nx = n
instance.parameters.ny = n
instance.parameters.nz = n
instance.parameters.length_x = n | generic_unit_system.length
instance.parameters.length_y = n | generic_unit_system.length
instance.parameters.length_z = n | generic_unit_system.length
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.y_boundary_conditions = ("periodic","periodic")
instance.parameters.z_boundary_conditions = ("periodic","periodic")
instance.set_has_external_gravitational_potential(1)
instance.commit_parameters()
density = generic_unit_system.mass / (generic_unit_system.length ** 3)
momentum = generic_unit_system.mass / (generic_unit_system.time * (generic_unit_system.length**2))
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
potential_grid = datamodel.Grid(n + 2, n + 2, n + 2)
potential_grid.potential = 0.0 | generic_unit_system.potential
x = instance.potential_grid.x
y = instance.potential_grid.y
z = instance.potential_grid.z
potential_grid.potential = (1 | generic_unit_system.potential) * ( (x + y + z) / (1 | generic_unit_system.length))
channel = potential_grid.new_channel_to(instance.potential_grid)
channel.copy()
result = instance.initialize_grid()
print x[...,0,0]
print instance.grid.x[...,0,0]
print instance.potential_grid.potential[...,0,0]
print potential_grid.potential[...,0,0]
self.assertAlmostRelativeEquals(potential_grid.potential[...,0,0], instance.potential_grid.potential[...,0,0])
interpolated_inside = instance.get_interpolated_gravitational_potential(x[0,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[0,0,0] - (1.0 |generic_unit_system.length), y[0,0,0], z[0,0,0])
print interpolated_inside, interpolated_outside
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
interpolated_inside = instance.get_interpolated_gravitational_potential(x[0,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[0,0,0] , y[0,0,0] - (2.0 |generic_unit_system.length), z[0,0,0])
print interpolated_inside, interpolated_outside
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
interpolated_inside = instance.get_interpolated_gravitational_potential(x[0,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[0,0,0] , y[0,0,0], z[0,0,0] - (2.0 |generic_unit_system.length))
print interpolated_inside, interpolated_outside
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
interpolated_inside = instance.get_interpolated_gravitational_potential(x[5,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[5,0,0] + (2.0 |generic_unit_system.length), y[0,0,0], z[0,0,0])
print interpolated_inside, interpolated_outside
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
interpolated_inside = instance.get_interpolated_gravitational_potential(x[5,0,0], y[0,5,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[5,0,0], y[0,5,0] + (2.0 |generic_unit_system.length), z[0,0,0])
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
interpolated_inside = instance.get_interpolated_gravitational_potential(x[5,0,0], y[0,5,0], z[0,0,5])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[5,0,0], y[0,5,0] , z[0,0,5] + (2.0 |generic_unit_system.length))
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
interpolated_inside = instance.get_interpolated_gravitational_potential(x[0,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[0,0,0] - (2.0 |generic_unit_system.length) , y[0,0,0] - (2.0 |generic_unit_system.length), z[0,0,0] - (2.0 |generic_unit_system.length))
print interpolated_inside, interpolated_outside
def test27(self):
n = 4
instance=self.new_instance(Athena)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.parameters.nx = n
instance.parameters.ny = n
instance.parameters.nz = 1
instance.parameters.length_x = n | generic_unit_system.length
instance.parameters.length_y = n | generic_unit_system.length
instance.parameters.length_z = 0 | generic_unit_system.length
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.y_boundary_conditions = ("periodic","periodic")
instance.parameters.z_boundary_conditions = ("periodic","periodic")
instance.set_has_external_gravitational_potential(1)
instance.commit_parameters()
density = generic_unit_system.mass / (generic_unit_system.length ** 3)
momentum = generic_unit_system.mass / (generic_unit_system.time * (generic_unit_system.length**2))
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
potential_grid = datamodel.Grid(n + 2, n + 2, 1)
potential_grid.potential = 0.0 | generic_unit_system.potential
x = instance.potential_grid.x
y = instance.potential_grid.y
z = instance.potential_grid.z
potential_grid.potential = (1 | generic_unit_system.potential) * ( (x + y) / (1 | generic_unit_system.length))
channel = potential_grid.new_channel_to(instance.potential_grid)
channel.copy()
result = instance.initialize_grid()
self.assertAlmostRelativeEquals(potential_grid.potential[...,0,0], instance.potential_grid.potential[...,0,0])
interpolated_inside = instance.get_interpolated_gravitational_potential(x[0,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[0,0,0] - (1.0 |generic_unit_system.length), y[0,0,0], z[0,0,0])
print interpolated_inside, interpolated_outside
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
interpolated_inside = instance.get_interpolated_gravitational_potential(x[0,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[0,0,0] , y[0,0,0] - (2.0 |generic_unit_system.length), z[0,0,0])
print interpolated_inside, interpolated_outside
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
interpolated_inside = instance.get_interpolated_gravitational_potential(x[5,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[5,0,0] + (2.0 |generic_unit_system.length), y[0,0,0], z[0,0,0])
print interpolated_inside, interpolated_outside
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
interpolated_inside = instance.get_interpolated_gravitational_potential(x[5,0,0], y[0,5,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[5,0,0], y[0,5,0] + (2.0 |generic_unit_system.length), z[0,0,0])
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
interpolated_inside = instance.get_interpolated_gravitational_potential(x[0,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[0,0,0] - (2.0 |generic_unit_system.length) , y[0,0,0] - (2.0 |generic_unit_system.length), z[0,0,0])
print interpolated_inside, interpolated_outside
def test28(self):
n = 4
instance=self.new_instance(Athena)
instance.set_gamma(1.6666666666666667)
instance.set_courant_friedrichs_lewy_number(0.3)
instance.parameters.nx = n
instance.parameters.ny = n
instance.parameters.nz = 1
instance.parameters.length_x = n | generic_unit_system.length
instance.parameters.length_y = n | generic_unit_system.length
instance.parameters.length_z = 0 | generic_unit_system.length
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.y_boundary_conditions = ("periodic","periodic")
instance.parameters.z_boundary_conditions = ("periodic","periodic")
instance.set_has_external_gravitational_potential(1)
instance.commit_parameters()
density = generic_unit_system.mass / (generic_unit_system.length ** 3)
momentum = generic_unit_system.mass / (generic_unit_system.time * (generic_unit_system.length**2))
energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length)
potential_grid = datamodel.Grid(n + 2, n + 2, 1)
potential_grid.potential = 0.0 | generic_unit_system.potential
x = instance.potential_grid.x
y = instance.potential_grid.y
z = instance.potential_grid.z
potential_grid.potential = (1 | generic_unit_system.potential) * ( (x + y) / (1 | generic_unit_system.length))
channel = potential_grid.new_channel_to(instance.potential_grid)
channel.copy()
result = instance.initialize_grid()
self.assertAlmostRelativeEquals(potential_grid.potential[...,0,0], instance.potential_grid.potential[...,0,0])
interpolated_inside = instance.get_interpolated_gravitational_potential(x[0,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[0,0,0] - (0.5 |generic_unit_system.length), y[0,0,0], z[0,0,0])
self.assertAlmostRelativeEquals(potential_grid.potential[...,0,0], instance.potential_grid.potential[...,0,0])
interpolated_inside = instance.get_interpolated_gravitational_potential(x[5,0,0], y[0,0,0], z[0,0,0])
interpolated_outside = instance.get_interpolated_gravitational_potential(x[5,0,0] + (0.5 |generic_unit_system.length), y[0,0,0], z[0,0,0])
print interpolated_inside, interpolated_outside
self.assertAlmostRelativeEquals(interpolated_inside, interpolated_outside)
def test29(self):
instance=self.new_instance(Athena)
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.mesh_length = (20.0, 1, 1) | generic_unit_system.length
instance.parameters.mesh_size = (20, 1, 1)
for x in instance.itergrids():
inmem = x.copy()
inmem.rho = inmem.x/(1| generic_unit_system.length) | generic_unit_system.density
inmem.rhovx = 0.0 | generic_unit_system.momentum_density
inmem.energy = 1.0 | generic_unit_system.energy_density
from_model_to_code = inmem.new_channel_to(x)
from_model_to_code.copy()
print inmem.rho
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(0.5| generic_unit_system.length,0.0| generic_unit_system.length,0.0| generic_unit_system.length)
self.assertEquals(rho , 0.5 | generic_unit_system.density)
for value in numpy.arange(0.5, 19.6, 0.1):
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_for_cell(
value | generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length,
1.0 | generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , value | generic_unit_system.density, 9)
for value in numpy.arange(0.0, 0.6, 0.1):
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_for_cell(
value | generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length,
1.0 | generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , ((0.5 + value) * 0.5 + (0.5-value) * 19.5) | generic_unit_system.density, 9)
for value in numpy.arange(0.0, 0.5, 0.1):
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_for_cell(
value + 19.5| generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length,
1.0 | generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , (19.5 - (value * 19)) | generic_unit_system.density, 9)
# out of range
rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_for_cell(
21.0| generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length,
1.0 | generic_unit_system.length,
0.0 | generic_unit_system.length,
0.0 | generic_unit_system.length
)
self.assertAlmostRelativeEquals(rho , 0.0 | generic_unit_system.density, 9)
def test29(self):
instance=self.new_instance(Athena)
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.mesh_length = (20.0, 1, 1) | generic_unit_system.length
instance.parameters.mesh_size = (20, 1, 1)
for x in instance.itergrids():
inmem = x.copy()
inmem.rho = inmem.x/(1| generic_unit_system.length) | generic_unit_system.density
inmem.rhovx = 0.0 | generic_unit_system.momentum_density
inmem.energy = 1.0 | generic_unit_system.energy_density
from_model_to_code = inmem.new_channel_to(x)
from_model_to_code.copy()
print inmem.rho
def test30(self):
instance=self.new_instance(Athena)
instance.parameters.x_boundary_conditions = ("periodic","periodic")
instance.parameters.mesh_length = (8, 1, 1) | generic_unit_system.length
instance.parameters.mesh_size = (8, 1, 1)
for x in instance.itergrids():
inmem = x.copy()
inmem.rho = inmem.x/(1| generic_unit_system.length) | generic_unit_system.density
inmem.rhovx = 0.0 | generic_unit_system.momentum_density
inmem.energy = 1.0 | generic_unit_system.energy_density
from_model_to_code = inmem.new_channel_to(x)
from_model_to_code.copy()
print inmem.rho
grid = instance.get_extended_grid()
self.assertEquals(grid.shape, (12,1,1))
instance.initialize_grid()
self.assertEquals(grid.rho[...,0,0] , [6.5,7.5,0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,0.5,1.5] | generic_unit_system.density)
```
#### File: test/codes_tests/test_mocassin.py
```python
import os
from amuse.test.amusetest import TestWithMPI
from amuse.units import units
from amuse import datamodel
from amuse.community.mocassin.interface import MocassinInterface, Mocassin, mocassin_rydberg_unit
import numpy
class TestMocassinInterface(TestWithMPI):
def test0(self):
instance=self.new_instance_of_an_optional_code(MocassinInterface)
instance.initialize_code()
instance.stop()
def test1(self):
instance=self.new_instance_of_an_optional_code(MocassinInterface)
instance.initialize_code()
#instance.redirect_outputs_to("moc-out.txt", "moc-err.txt")
instance.setup_mesh(11,11,11, 100,100,100)
instance.setup_abundancies()
print instance.get_default_input_directory()
instance.set_input_directory(instance.get_default_input_directory())
instance.set_constant_hydrogen_density(900.0)
instance.commit_parameters()
indices_x = list(range(1,12,1))
x,y,z,error = instance.get_position_of_index(indices_x,[1]*len(indices_x), [1]*len(indices_x))
self.assertEquals(error, 0)
for index, expected_x in enumerate(list(range(-100, 120, 20))):
self.assertAlmostRelativeEqual(x[index], expected_x, 6)
self.assertAlmostRelativeEqual(y[index], -100)
self.assertAlmostRelativeEqual(z[index], -100)
instance.stop()
def test2(self):
instance=self.new_instance_of_an_optional_code(MocassinInterface) #, debugger = "ddd")
instance.initialize_code()
instance.set_symmetricXYZ(True)
instance.setup_mesh(13,13,13,0.95E+19,0.95E+19,0.95E+19)
instance.setup_abundancies()
instance.set_input_directory(instance.get_default_input_directory())
instance.set_constant_hydrogen_density(100.0)
instance.set_initial_nebular_temperature(6000.0)
instance.set_maximum_number_of_monte_carlo_iterations(20)
instance.set_minimum_convergence_level(100)
instance.set_total_number_of_photons(10000000)
instance.set_total_number_of_points_in_frequency_mesh(600)
instance.set_high_limit_of_the_frequency_mesh(15)
instance.set_low_limit_of_the_frequency_mesh(1.001e-5)
instance.set_convergence_limit(0.09)
instance.set_number_of_ionisation_stages(6)
instance.setup_auto_convergence(0.2, 2.0, 1000000000)
instance.commit_parameters()
instance.define_stars(0.0, 0.0, 0.0, 20000, 6003.6396)
instance.commit_particles()
instance.commit_grid()
x, error = instance.get_number_of_elements_used()
self.assertEquals(0, error)
self.assertEquals(x, 7)
indices_x = list(range(1,12,1))
is_active,error = instance.get_grid_active(indices_x,[1]*len(indices_x), [1]*len(indices_x), 1)
self.assertEquals(0, error)
is_active=numpy.array(is_active,dtype=bool)
self.assertEquals([True,True,True,True,True,True,True,True,True,True,True] , is_active)
indices_x = list(range(5,12,1))
temperatures,error = instance.get_grid_electron_temperature(indices_x,[1]*len(indices_x), [1]*len(indices_x), 1)
self.assertEquals(0, error)
self.assertEquals([6000.0] * len(indices_x), temperatures)
indices_x = list(range(1,5,1))
temperatures,error = instance.get_grid_electron_temperature(indices_x,[1]*len(indices_x), [1]*len(indices_x), 1)
self.assertEquals(0, error)
self.assertEquals([6000.0] * len(indices_x), temperatures)
ni, nj, nk, error = instance.get_max_indices(1)
self.assertEquals(0, error)
self.assertEquals(ni, 13)
self.assertEquals(nj, 13)
self.assertEquals(nj, 13)
instance.stop()
def xtest3(self):
instance=self.new_instance_of_an_optional_code(MocassinInterface) #, debugger = "ddd")
#instance.redirect_outputs_to("moc3-out.txt", "moc3-err.txt")
instance.initialize_code()
instance.set_symmetricXYZ(True)
instance.setup_mesh(13,13,13,0.95E+19,0.95E+19,0.95E+19)
instance.setup_abundancies()
#instance.set_abundancies_filename('abunHII20.in')
instance.set_input_directory(instance.get_default_input_directory())
instance.set_constant_hydrogen_density(100.0)
instance.set_initial_nebular_temperature(6000.0)
instance.set_maximum_number_of_monte_carlo_iterations(20)
instance.set_minimum_convergence_level(100)
instance.set_total_number_of_photons(10000000)
instance.set_total_number_of_points_in_frequency_mesh(600)
instance.set_high_limit_of_the_frequency_mesh(15.)
instance.set_low_limit_of_the_frequency_mesh(1.001e-5)
instance.set_convergence_limit(0.09)
instance.set_number_of_ionisation_stages(6)
instance.setup_auto_convergence(0.8, 2.0, 1000000000)
#instance.set_emit_rate_of_photons(1.006e13)
instance.commit_parameters()
instance.define_stars(0.0, 0.0, 0.0, 20000.0, 6003.6396)
instance.commit_particles()
instance.commit_grid()
x, error = instance.get_number_of_elements_used()
self.assertEquals(0, error)
self.assertEquals(x, 7)
instance.iterate()
indices_x = list(range(1,12,1))
temperatures,error = instance.get_grid_electron_temperature(indices_x,[1]*len(indices_x), [1]*len(indices_x), 1)
self.assertEquals(0, error)
print temperatures
instance.stop()
def test4(self):
instance=self.new_instance_of_an_optional_code(MocassinInterface)
instance.initialize_code()
instance.setup_mesh(3,3,3,0.95E+19,0.95E+19,0.95E+19)
instance.setup_abundancies()
instance.set_input_directory(instance.get_default_input_directory())
instance.set_initial_nebular_temperature(6000.0)
instance.set_maximum_number_of_monte_carlo_iterations(20)
instance.set_minimum_convergence_level(100)
instance.set_total_number_of_photons(10000000)
instance.set_total_number_of_points_in_frequency_mesh(600)
instance.set_high_limit_of_the_frequency_mesh(15)
instance.set_low_limit_of_the_frequency_mesh(1.001e-5)
instance.set_convergence_limit(0.09)
instance.set_number_of_ionisation_stages(6)
instance.setup_auto_convergence(0.2, 2.0, 1000000000)
instance.commit_parameters()
error=instance.define_stars(0.0, 0.0, 0.0, 20000, 6003.6396)
self.assertEquals(error, 0)
instance.set_grid_hydrogen_density(1,1,1, 100)
value,error = instance.get_grid_hydrogen_density(1,1,1)
self.assertEquals(error, 0)
self.assertEquals(value, 100)
is_active,error = instance.get_grid_active(1,1,1)
self.assertEquals(error, 0)
self.assertFalse(is_active)
is_active,error = instance.get_grid_active(1,2,1)
self.assertEquals(error, 0)
self.assertFalse(is_active)
value,error = instance.get_grid_hydrogen_density(1,2,1,1)
self.assertEquals(error, 0)
self.assertEquals(value, 0)
instance.commit_particles()
instance.commit_grid()
is_active,error = instance.get_grid_active(1,1,1)
self.assertEquals(error, 0)
self.assertTrue(is_active)
value,error = instance.get_grid_hydrogen_density(1,1,1)
self.assertEquals(error, 0)
self.assertEquals(value, 100)
value,error = instance.get_grid_ion_density(1,1,1,1,1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEquals(value, 1e-5, 6)
is_active,error = instance.get_grid_active(1,2,1)
self.assertEquals(error, 0)
self.assertFalse(is_active)
value,error = instance.get_grid_hydrogen_density(1,2,1)
self.assertEquals(error, 0)
self.assertEquals(value, 0)
instance.stop()
class TestMocassin(TestWithMPI):
def test1(self):
instance=self.new_instance_of_an_optional_code(Mocassin)
instance.initialize_code()
self.assertEquals(0.0 | units.cm**-3, instance.parameters.constant_hydrogen_density)
print instance.parameters.abundancies_filename
instance.parameters.constant_hydrogen_density = 100.0 | units.cm**-3
self.assertEquals(100.0 | units.cm**-3, instance.parameters.constant_hydrogen_density)
self.assertEquals(10000 | units.K, instance.parameters.initial_nebular_temperature)
self.assertEquals("", instance.parameters.abundancies_filename)
instance.stop()
def test2(self):
instance=self.new_instance_of_an_optional_code(Mocassin) #, redirection = "none")
instance.initialize_code()
instance.set_input_directory(instance.get_default_input_directory())
instance.set_symmetricXYZ(True)
instance.parameters.nx = 11
instance.parameters.ny = 12
instance.parameters.nz = 13
instance.parameters.length_x = 1 | units.km
instance.parameters.length_y = 1 | units.km
instance.parameters.length_z = 1 | units.km
instance.commit_parameters()
self.assertEquals(instance.grid.shape[0], 11)
self.assertEquals(instance.grid.shape[1], 12)
self.assertEquals(instance.grid.shape[2], 13)
instance.stop()
def test3(self):
instance=self.new_instance_of_an_optional_code(Mocassin) #, debugger = "xterm")
instance.initialize_code()
instance.set_random_seed(1)
instance.set_input_directory(instance.get_default_input_directory())
instance.set_mocassin_output_directory(instance.output_directory + os.sep)
instance.set_initial_nebular_temperature(200.0 | units.K)
instance.parameters.nx = 7
instance.parameters.ny = 7
instance.parameters.nz = 7
print (0.95E+19 | units.cm).value_in(units.parsec)
instance.parameters.length_x = 0.95E+19 | units.cm
instance.parameters.length_y = 0.95E+19 | units.cm
instance.parameters.length_z = 0.95E+19 | units.cm
instance.set_high_limit_of_the_frequency_mesh(15 | mocassin_rydberg_unit)
instance.set_low_limit_of_the_frequency_mesh(1.001e-5| mocassin_rydberg_unit)
instance.set_maximum_number_of_monte_carlo_iterations(1)
instance.set_total_number_of_photons(100)
#~ instance.set_constant_hydrogen_density(100 | units.cm**-3)
instance.commit_parameters()
instance.grid.hydrogen_density = 100 | units.cm**-3
instance.commit_grid()
p = datamodel.Particle()
p.x = 0 | units.cm
p.y = 0 | units.cm
p.z = 0 | units.cm
p.temperature = 20000 | units.K
p.luminosity = 1. | units.LSun
instance.particles.add_particle(p)
instance.commit_particles()
self.assertAlmostRelativeEquals(1e-5, instance.ion_density_grid.density[3][1][2][0][0], 7)
self.assertAlmostRelativeEquals(1e-5 , instance.ion_density_grid.density[3][1][3][0][0], 7)
instance.step()
print instance.grid.electron_density.mean()
self.assertAlmostRelativeEquals(0.0, instance.get_percentage_converged())
self.assertGreater(instance.grid.electron_density.mean(), 65. | units.cm**-3)
self.assertLess(instance.grid.electron_density.mean(), 95. | units.cm**-3)
instance.stop()
```
#### File: test/codes_tests/test_octgrav.py
```python
import os
import sys
import numpy
from amuse.community.octgrav.interface import OctgravInterface, Octgrav
from amuse.test.amusetest import TestWithMPI
from amuse.units import nbody_system
from amuse.units import units
from amuse import datamodel
from amuse.rfi import channel
from amuse.ic.plummer import *
class TestMPIInterface(TestWithMPI):
def test1(self):
instance = self.new_instance_of_an_optional_code(OctgravInterface)
instance.new_particle(11.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0)
retrieved_state = instance.get_state(1)
self.assertEquals(11.0, retrieved_state['mass'])
self.assertEquals(2.0, retrieved_state['radius'])
self.assertEquals(instance.get_number_of_particles()['number_of_particles'], 1)
instance.cleanup_code()
instance.stop()
def test2(self):
instance = self.new_instance_of_an_optional_code(OctgravInterface)
instance.initialize_code()
instance.new_particle(
[1,10,100],
[3,12,102],
[4,13,103],
[5,14,104],
[6,15,105],
[7,16,106],
[8,17,107],
[2,11,101])
particle1_state = instance.get_state(1)
self.assertEquals(1, particle1_state['mass'])
particle2_state = instance.get_state(2)
self.assertEquals(10, particle2_state['mass'])
instance.delete_particle(1)
size_result = instance.get_number_of_particles()
self.assertEquals(2, size_result['number_of_particles'])
new_particle1_state = instance.get_state(1)
self.assertEquals(10, new_particle1_state['mass'])
new_particle_result = instance.new_particle(
1000,
1002,
1003,
1004,
1005,
1006,
1007,
1001)
self.assertEquals(4, new_particle_result['index_of_the_particle'],4)
new_particle4_state = instance.get_state(4)
self.assertEquals(1000, new_particle4_state['mass'])
instance.cleanup_code()
instance.stop()
def test3(self):
instance = self.new_instance_of_an_optional_code(OctgravInterface)
instance.initialize_code()
instance.set_eps2(0.1**2)
instance.commit_parameters()
ids = []
for i in range(32):
id,errorcode = instance.new_particle(mass = 10.0, radius = 1.0, x = i, y = 0.0, z = 0.0, vx = 0.0, vy = 0.0, vz = 0.0)
ids.append(id)
self.assertEquals(errorcode, 0)
instance.commit_particles()
potential, errorcode = instance.get_potential(ids[0])
self.assertEquals(errorcode, 0)
excpected_potential = numpy.sum([ -10.0 / numpy.sqrt((x+1.0)**2 + 0.1**2) for x in range(31)])
self.assertAlmostRelativeEquals(potential,excpected_potential , 5)
total_potential, errorcode = instance.get_potential_energy()
potentials, errorcode = instance.get_potential(ids)
self.assertAlmostRelativeEquals(total_potential, numpy.sum(potentials * 10.0) / 2.0)
class TestAmuseInterface(TestWithMPI):
def new_system_of_sun_and_earth(self):
stars = datamodel.Stars(2)
sun = stars[0]
sun.mass = units.MSun(1.0)
sun.position = units.m(numpy.array((0.0,0.0,0.0)))
sun.velocity = units.ms(numpy.array((0.0,0.0,0.0)))
sun.radius = units.RSun(1.0)
earth = stars[1]
earth.mass = units.kg(5.9736e24)
earth.radius = units.km(6371)
earth.position = units.km(numpy.array((149.5e6,0.0,0.0)))
earth.velocity = units.ms(numpy.array((0.0,29800,0.0)))
return stars
def test0(self):
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, units.AU)
instance = self.new_instance_of_an_optional_code(Octgrav, convert_nbody)
instance.initialize_code()
self.assertAlmostRelativeEqual(0.01, instance.parameters.epsilon_squared.value_in(units.AU**2), 2)#default
instance.parameters.epsilon_squared = 0.05 | units.AU**2
self.assertAlmostRelativeEqual(0.05, instance.parameters.epsilon_squared.value_in(units.AU**2), 6)
self.assertAlmostEqual(0.8, instance.parameters.opening_angle, 6)#default
instance.parameters.opening_angle = 0.5
self.assertAlmostEqual(0.5, instance.parameters.opening_angle, 6)
instance.parameters.timestep = 1.0 |units.s
self.assertEqual(1.0|units.s, instance.parameters.timestep)
instance.stop()
def test1(self):
plummer_size = 500
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 149.5e6 | units.km)
stars = new_plummer_model(plummer_size, convert_nbody)
stars.radius = range(1, plummer_size+1)|units.km
instance = self.new_instance_of_an_optional_code(Octgrav, convert_nbody)
instance.particles.add_particles(stars)
instance.evolve_model(5 | units.day)
energy_total_init = instance.potential_energy + instance.kinetic_energy
instance.evolve_model(100 | units.day)
energy_total_final = instance.potential_energy + instance.kinetic_energy
self.assertAlmostRelativeEqual(energy_total_init, energy_total_final, 2)
instance.stop()
def test2(self):
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 149.5e6 | units.km)
instance = self.new_instance_of_an_optional_code(Octgrav, convert_nbody)
instance.initialize_code()
instance.parameters.epsilon_squared = 0.0 | units.AU**2
instance.parameters.stopping_conditions_number_of_steps = 1
self.assertEquals(instance.parameters.stopping_conditions_number_of_steps,1)
stars = self.new_system_of_sun_and_earth()
earth = stars[1]
instance.particles.add_particles(stars)
instance.stopping_conditions.number_of_steps_detection.enable()
instance.evolve_model(365.0 | units.day)
self.assertTrue(instance.stopping_conditions.number_of_steps_detection.is_set())
instance.cleanup_code()
instance.stop()
def test3(self):
particles = datamodel.Particles(2)
particles.x = [0.0,10.0] | nbody_system.length
particles.y = 0 | nbody_system.length
particles.z = 0 | nbody_system.length
particles.radius = 0.005 | nbody_system.length
particles.vx = 0 | nbody_system.speed
particles.vy = 0 | nbody_system.speed
particles.vz = 0 | nbody_system.speed
particles.mass = 1.0 | nbody_system.mass
instance = self.new_instance_of_an_optional_code(Octgrav)
instance.initialize_code()
instance.parameters.stopping_conditions_number_of_steps = 20
self.assertEquals(instance.parameters.stopping_conditions_number_of_steps, 20)
instance.parameters.epsilon_squared = (0.01 | nbody_system.length)**2
instance.particles.add_particles(particles)
instance.stopping_conditions.number_of_steps_detection.enable()
instance.evolve_model(10 | nbody_system.time)
self.assertTrue(instance.stopping_conditions.number_of_steps_detection.is_set())
self.assertTrue(instance.model_time < 10 | nbody_system.time)
instance.stop()
def test4(self):
plummer_size = 500
stars = new_plummer_model(plummer_size)
stars.radius=0|nbody_system.length
instance = self.new_instance_of_an_optional_code(Octgrav)
instance.particles.add_particles(stars)
instance.synchronize_model()
ax,ay,az=instance.get_gravity_at_point(0. | nbody_system.length,
0. | nbody_system.length,
100. | nbody_system.length,
0. | nbody_system.length)
self.assertAlmostEqual(ax.number,0., 3)
self.assertAlmostRelativeEqual(ay.number,-1./100**2, 3)
self.assertAlmostEqual(az.number,0., 3)
pot=instance.get_potential_at_point([0.,0.]|nbody_system.length,
[0.,100] | nbody_system.length,
[100.,0.] | nbody_system.length,
[0.,0.] | nbody_system.length)
self.assertAlmostRelativeEqual(pot.number,[-1/100.,-1/100.], 3)
instance.stop()
```
#### File: test/codes_tests/test_seba.py
```python
import numpy
from amuse.test.amusetest import TestWithMPI
from amuse.community.seba.interface import SeBaInterface, SeBa
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import Particle
from amuse.datamodel import Particles
class TestSeBaInterface(TestWithMPI):
def test1(self):
instance = self.new_instance_of_an_optional_code(SeBaInterface)
endtime, mass, radius, luminosity, temperature, time_step, stellar_type, error = instance.evolve_star(1, 4600, 0.02)
self.assertEquals(error, 0)
self.assertTrue( endtime <= 4600.0)
self.assertAlmostRelativeEqual(endtime, 4600.0, 4)
self.assertAlmostRelativeEqual(mass, 1.0, 6)
self.assertAlmostRelativeEqual(radius, 0.9856, 4)
self.assertAlmostRelativeEqual(luminosity, 0.9585, 4)
self.assertAlmostRelativeEqual(temperature, 5751, 4)
self.assertAlmostRelativeEqual(time_step, 1089.3, 4)
self.assertEqual(stellar_type, 1)
instance.stop()
def test2(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle(1.)
self.assertEquals(error, 0)
self.assertEquals(index, 1)
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 1.0, 6)
value, error = instance.get_radius(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 0.88824945029751212, 6)
stellar_type, error = instance.get_stellar_type(index)
self.assertEquals(error, 0)
self.assertEquals(stellar_type, 1)
instance.stop()
def test3(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle(1.)
self.assertEquals(error, 0)
self.assertEquals(index, 1)
error = instance.evolve_model(4600)
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 1.0, 6)
value, error = instance.get_radius(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 0.9856, 4)
value, error = instance.get_temperature(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 5751, 4)
value, error = instance.get_time_step(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 1089.3, 4)
stellar_type, error = instance.get_stellar_type(index)
self.assertEquals(error, 0)
self.assertEquals(stellar_type, 1)
instance.stop()
def test4(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle(1.)
self.assertEquals(error, 0)
self.assertEquals(index, 1)
for t in range(46):
error = instance.evolve_model((t+1) * 100)
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 1.0, 6)
value, error = instance.get_radius(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 0.9856, 4)
value, error = instance.get_temperature(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 5751, 4)
value, error = instance.get_time_step(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 1089.3, 4)
stellar_type, error = instance.get_stellar_type(index)
self.assertEquals(error, 0)
self.assertEquals(stellar_type, 1)
instance.stop()
def test5(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle([1., 2., 3.])
self.assertEquals(error, 0)
self.assertEquals(index, [1,2,3])
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 2 , 6)
mass, error = instance.get_mass(3)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 3, 6)
error = instance.evolve_model(4600)
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
print mass
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass[0], 1.0, 6)
self.assertAlmostRelativeEqual(mass[1], 0.62973, 4)
self.assertAlmostRelativeEqual(mass[2], 0.75012, 4)
instance.stop()
def test6(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle([1., 2., 3.])
self.assertEquals(error, 0)
self.assertEquals(index, [1,2,3])
for t in range(46):
error = instance.evolve_model((t+1) * 100)
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
print mass
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, [1.0, 0.62973, 0.75072], 4)
instance.stop()
def test7(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle([1., 2., 3.])
self.assertEquals(error, 0)
self.assertEquals(index, [1,2,3])
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 2 , 6)
mass, error = instance.get_mass(3)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 3, 6)
mass, error = instance.get_mass(4)
self.assertEquals(error, -1)
error = instance.delete_star(2)
self.assertEquals(error, 0)
mass, error = instance.get_mass(2)
self.assertEquals(error, -1)
mass, error = instance.get_mass(3)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 3, 6)
index, error = instance.new_particle(4.)
self.assertEquals(error, 0)
self.assertEquals(index, 4)
instance.stop()
def test8(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle([3.0,1.0,2.0])
self.assertEquals(error, 0)
self.assertEquals(index, [1,2,3])
error = instance.delete_star(1)
self.assertEquals(error, 0)
error = instance.evolve_model(4600);
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 1, 6)
error = instance.delete_star(3)
self.assertEquals(error, 0)
index,error = instance.new_particle([5.0])
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 5.0, 6)
error = instance.evolve_model(5000);
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.99057, 4)
error = instance.delete_star(2)
self.assertEquals(error, 0)
error = instance.delete_star(index)
self.assertEquals(error, 0)
for i in range(4):
mass, error = instance.get_mass(index+1)
self.assertEquals(error, -1)
index,error = instance.new_particle([5.0])
self.assertEquals(error, 0)
error = instance.evolve_model(10000);
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.99057, 4)
instance.stop()
def test9(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
instance.set_metallicity(0.001)
index,error = instance.new_particle([3.0,0.3])
self.assertEquals(error, 0)
self.assertEquals(index, [1,2])
mu = (3.3 | units.MSun) * constants.G
orbital_period = 200.0 | units.day
semi_major_axis = (((orbital_period / 2.0 * numpy.pi)**2)*mu)**(1.0/3.0)
print semi_major_axis.value_in(units.RSun)
eccentricity = 0.5
index,error = instance.new_binary(
semi_major_axis.value_in(units.RSun),
eccentricity,
index[0],
index[1]
)
self.assertEquals(error, 0)
self.assertEquals(index, 3)
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 3.3, 4)
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.3, 4)
error = instance.evolve_model(300)
self.assertEquals(error, 0)
mass, error = instance.get_mass(1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 2.98777, 4)
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.29999, 4)
error = instance.evolve_model(400)
self.assertEquals(error, 0)
mass, error = instance.get_mass(1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.86679, 4)
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.3, 4)
error = instance.delete_binary(index)
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
self.assertEquals(error, -1)
# check if singles are still in the mode and evolve
value, error = instance.get_age([1,2])
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 400, 4)
error = instance.evolve_model(500)
self.assertEquals(error, 0)
value, error = instance.get_age([1,2])
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 500, 4)
class TestSeBa(TestWithMPI):
def test1(self):
instance = self.new_instance_of_an_optional_code(SeBa)
endtime, mass, radius, luminosity, temperature, time_step, stellar_type = instance.evolve_star(1 | units.MSun, 4600 | units.Myr, 0.02)
self.assertTrue( endtime <= 4600 | units.Myr)
self.assertAlmostRelativeEqual(mass, 1.0 | units.MSun, 4)
self.assertAlmostRelativeEqual(radius, 0.9856 | units.RSun, 4)
self.assertAlmostRelativeEqual(luminosity, 0.9585 | units.LSun, 4)
self.assertAlmostRelativeEqual(temperature, 5751 | units.K, 4)
self.assertAlmostRelativeEqual(time_step, 1089.3 | units.Myr, 4)
self.assertEqual(stellar_type, 1 | units.stellar_type)
def test2(self):
instance = self.new_instance_of_an_optional_code(SeBa)
p = Particle()
p.mass = 5 | units.MSun
p.metallicity = 0.02
p = instance.particles.add_particle(p)
instance.evolve_model(130 | units.Myr)
print p
self.assertAlmostRelativeEqual(p.mass, 0.9906 | units.MSun, 4)
def test3(self):
print "Testing evolution of a close binary system..."
instance = self.new_instance_of_an_optional_code(SeBa)
instance.commit_parameters()
stars = Particles(2)
stars[0].mass = 3.0 | units.MSun
stars[1].mass = 0.3 | units.MSun
mu = (3.3 | units.MSun) * constants.G
orbital_period = 200.0 | units.day
semi_major_axis = (((orbital_period / (2.0 * numpy.pi))**2)*mu)**(1.0/3.0)
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0.5
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
from_seba_to_model = instance.particles.new_channel_to(stars)
from_seba_to_model.copy()
from_seba_to_model_binaries = instance.binaries.new_channel_to(binaries)
from_seba_to_model_binaries.copy()
previous_type = binary.child1.stellar_type
results = []
current_time = 0 | units.Myr
while current_time < (480 | units.Myr):
instance.update_time_steps()
# The next line appears a bit weird, but saves time for this simple test.
deltat = max(1.0*instance.binaries[0].time_step, 0.1| units.Myr)
current_time = current_time + deltat
instance.evolve_model(current_time)
from_seba_to_model.copy()
from_seba_to_model_binaries.copy()
if not binary.child1.stellar_type == previous_type:
results.append((binary.age, binary.child1.mass, binary.child1.stellar_type))
previous_type = binary.child1.stellar_type
self.assertEqual(len(results), 6)
for x in results:
print x
types = (
"Hertzsprung Gap",
"First Giant Branch",
"Core Helium Burning",
"First Asymptotic Giant Branch",
"Giant Branch Naked Helium star",
"Carbon/Oxygen White Dwarf",
)
for result, expected in zip(results, types):
self.assertEquals(str(result[2]), expected)
times = (
377.6369 | units.Myr,
379.8877 | units.Myr,
382.3112 | units.Myr,
473.4804 | units.Myr,
475.4766 | units.Myr,
476.6182 | units.Myr,
)
for result, expected in zip(results, times):
self.assertAlmostEqual(result[0].value_in(units.Myr), expected.value_in(units.Myr), 0)
masses = (
3.0000 | units.MSun,
3.0000 | units.MSun,
2.9983 | units.MSun,
2.9741 | units.MSun,
0.6710 | units.MSun,
0.6596 | units.MSun,
)
for result, expected in zip(results, masses):
self.assertAlmostEqual(result[1].value_in(units.MSun), expected.value_in(units.MSun), 2)
instance.stop()
def test5(self):
instance = self.new_instance_of_an_optional_code(SeBa)
self.assertAlmostRelativeEquals(instance.parameters.metallicity , 0.02)
instance.parameters.metallicity = 0.04
self.assertAlmostRelativeEquals(instance.parameters.metallicity , 0.04)
def test6(self):
instance = self.new_instance_of_an_optional_code(SeBa)
self.assertFalse(instance.parameters.is_logging_of_evolve_enabled)
instance.parameters.is_logging_of_evolve_enabled = True
self.assertTrue(instance.parameters.is_logging_of_evolve_enabled)
def test7(self):
instance = self.new_instance_of_an_optional_code(SeBa)
instance.commit_parameters()
stars = Particles(2)
stars[0].mass = 3.0 | units.MSun
stars[1].mass = 0.3 | units.MSun
mu = (3.3 | units.MSun) * constants.G
orbital_period = 200.0 | units.day
semi_major_axis = (((orbital_period / (2.0 * numpy.pi))**2)*mu)**(1.0/3.0)
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0.5
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
self.assertAlmostRelativeEquals(instance.binaries[0].child1.mass, 3.0 | units.MSun, 4)
self.assertAlmostRelativeEquals(instance.binaries[0].child2.mass, 0.3 | units.MSun, 4)
def xtest7(self):
instance = self.new_instance_of_an_optional_code(SeBa)
instance.parameters.metallicity = 0.03
p = Particle()
p.mass = 99.1605930967 | units.MSun
p = instance.particles.add_particle(p)
instance.evolve_model(614 | units.Myr)
print p.stellar_type
self.assertEquals(str(p.stellar_type),'Black Hole')
self.assertAlmostRelativeEqual(p.mass, 0.9906 | units.MSun, 4)
def test8(self):
instance = self.new_instance_of_an_optional_code(SeBa)
instance.parameters.supernova_kick_velocity = 0 | units.kms
instance.commit_parameters()
print "v_kick=", instance.parameters.supernova_kick_velocity
stars = Particles(2)
stars[0].mass = 10.0 | units.MSun
stars[1].mass = 9 | units.MSun
semi_major_axis = 10000|units.AU
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
instance.evolve_model(30|units.Myr)
print instance.particles
print instance.binaries
self.assertAlmostRelativeEquals(instance.binaries[0].eccentricity, 0.7872, 4)
def test9(self):
instance = self.new_instance_of_an_optional_code(SeBa)
stars = Particles(2)
stars[0].mass = 10.0 | units.MSun
stars[1].mass = 9 | units.MSun
instance.particles.add_particles(stars)
instance.evolve_model(30|units.Myr)
self.assertAlmostRelativeEquals(instance.particles.age, [30,30] |units.Myr)
self.assertAlmostRelativeEquals(instance.model_time, 30 | units.Myr)
self.assertAlmostRelativeEquals(instance.particles[0].mass, 1.2263 | units.MSun, 4)
self.assertAlmostRelativeEquals(instance.particles[1].mass, 8.8682 | units.MSun, 4)
stars = Particles(2)
stars[0].mass = 10.0 | units.MSun
stars[1].mass = 9 | units.MSun
instance.particles.add_particles(stars)
instance.evolve_model(60|units.Myr)
print instance.particles.age
print instance.particles.mass
self.assertAlmostRelativeEquals(instance.model_time, 60 | units.Myr)
self.assertAlmostRelativeEquals(instance.particles.age, [60,60,30,30] |units.Myr)
self.assertAlmostRelativeEquals(instance.particles[2].mass, 1.2263 | units.MSun, 4)
self.assertAlmostRelativeEquals(instance.particles[3].mass, 8.8682 | units.MSun, 4)
def test10(self):
""" Test supernova stopping condition """
instance = self.new_instance_of_an_optional_code(SeBa)
instance.stopping_conditions.supernova_detection.enable()
p = Particle()
p.mass = 10 | units.MSun
p.metallicity = 0.02
p = instance.particles.add_particle(p)
instance.set_supernova_kick_velocity(0.0|units.kms)
instance.evolve_model(30 | units.Myr)
self.assertEquals(instance.stopping_conditions.supernova_detection.is_set(), True)
self.assertEquals(instance.stopping_conditions.supernova_detection.particles(0)[0].key, p.key)
self.assertAlmostRelativeEqual(p.age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(p.mass, 1.22632 | units.MSun, 4)
self.assertAlmostRelativeEqual(p.natal_kick_velocity, [0,0,0] | units.kms, 4)
def test11(self):
""" Test supernova stopping condition in a binary """
instance = self.new_instance_of_an_optional_code(SeBa)
instance.stopping_conditions.supernova_detection.enable()
stars = Particles(2)
stars[0].mass = 10.0 | units.MSun
stars[1].mass = 5.0 | units.MSun
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
binary.semi_major_axis = 1.e6|units.RSun
binary.eccentricity = 0.1
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
instance.evolve_model(30 | units.Myr)
self.assertEquals(instance.stopping_conditions.supernova_detection.is_set(), True)
self.assertEquals(instance.stopping_conditions.supernova_detection.particles(0)[0].key, instance.binaries[0].child1.key)
self.assertEquals(instance.stopping_conditions.supernova_detection.particles(0)[0].key, instance.particles[0].key)
print instance.parameters
self.assertAlmostRelativeEqual(instance.particles[0].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[1].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[0].mass, 1.22632 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[1].mass, 5.0 | units.MSun, 4)
# self.assertAlmostRelativeEqual(instance.particles[0].natal_kick_velocity, [0,0,0] | units.kms, 4)
self.assertAlmostRelativeEqual(instance.binaries[0].child1.age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.binaries[0].child2.age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.binaries[0].child1.mass, 1.22632 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.binaries[0].child2.mass, 5.0 | units.MSun, 4)
# self.assertAlmostRelativeEqual(instance.binaries[0].child1.natal_kick_velocity, [0,0,0] | units.kms, 4)
def test12(self):
""" Test supernova stopping condition with multiple stars """
instance = self.new_instance_of_an_optional_code(SeBa)
instance.stopping_conditions.supernova_detection.enable()
stars = Particles(3)
stars[0].mass = 10.0 | units.MSun
stars[1].mass = 5.0 | units.MSun
stars[2].mass = 0.5 | units.MSun
instance.particles.add_particles(stars)
instance.evolve_model(30 | units.Myr)
self.assertEquals(instance.stopping_conditions.supernova_detection.is_set(), True)
self.assertEquals(instance.stopping_conditions.supernova_detection.particles(0)[0].key, instance.particles[0].key)
self.assertAlmostRelativeEqual(instance.particles[0].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[1].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[2].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[0].mass, 1.22632 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[1].mass, 5.0 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[2].mass, 0.5 | units.MSun, 4)
# self.assertAlmostRelativeEqual(instance.particles[0].natal_kick_velocity, [0,0,0] | units.kms, 4)
def test13(self):
""" Test supernova stopping condition with multiple stars """
instance = self.new_instance_of_an_optional_code(SeBa)
instance.stopping_conditions.supernova_detection.enable()
stars = Particles(3)
stars[0].mass = 10.0 | units.MSun
stars[1].mass = 11.0 | units.MSun
stars[2].mass = 0.5 | units.MSun
instance.particles.add_particles(stars)
instance.evolve_model(30 | units.Myr)
self.assertEquals(instance.stopping_conditions.supernova_detection.is_set(), True)
self.assertEquals(instance.stopping_conditions.supernova_detection.particles(0)[0].key, instance.particles[1].key)
self.assertAlmostRelativeEqual(instance.particles[0].age, 23.08688 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[1].age, 23.08688 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[2].age, 23.08688 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[0].mass, 9.92275 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[1].mass, 1.23645 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[2].mass, 0.5 | units.MSun, 4)
# self.assertAlmostRelativeEqual(instance.particles[0].natal_kick_velocity, [0,0,0] | units.kms, 4)
instance.evolve_model(30 | units.Myr)
self.assertEquals(instance.stopping_conditions.supernova_detection.is_set(), True)
self.assertEquals(instance.stopping_conditions.supernova_detection.particles(0)[0].key, instance.particles[0].key)
self.assertAlmostRelativeEqual(instance.particles[0].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[1].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[2].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[0].mass, 1.22632 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[1].mass, 1.23645 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[2].mass, 0.5 | units.MSun, 4)
# self.assertAlmostRelativeEqual(instance.particles[0].natal_kick_velocity, [0,0,0] | units.kms, 4)
def test14(self):
""" Test supernova stopping condition with multiple stars of equal mass """
instance = self.new_instance_of_an_optional_code(SeBa)
instance.stopping_conditions.supernova_detection.enable()
stars = Particles(3)
stars[0].mass = 10.0 | units.MSun
stars[1].mass = 10.0 | units.MSun
stars[2].mass = 0.5 | units.MSun
instance.particles.add_particles(stars)
instance.evolve_model(30 | units.Myr)
self.assertEquals(instance.stopping_conditions.supernova_detection.is_set(), True)
self.assertEquals(len(instance.stopping_conditions.supernova_detection.particles(0)), 2)
self.assertEquals(instance.stopping_conditions.supernova_detection.particles(0)[0].key, instance.particles[0].key)
self.assertEquals(instance.stopping_conditions.supernova_detection.particles(0)[1].key, instance.particles[1].key)
self.assertAlmostRelativeEqual(instance.particles[0].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[1].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[2].age, 27.35866 | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[0].mass, 1.22632 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[1].mass, 1.22632 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[2].mass, 0.5 | units.MSun, 4)
# self.assertAlmostRelativeEqual(instance.particles[0].natal_kick_velocity, [0,0,0] | units.kms, 4)
# self.assertAlmostRelativeEqual(instance.particles[1].natal_kick_velocity, [0,0,0] | units.kms, 4)
instance.evolve_model(30 | units.Myr)
self.assertEquals(instance.stopping_conditions.supernova_detection.is_set(), False)
self.assertAlmostRelativeEqual(instance.particles[0].age, 30. | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[1].age, 30. | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[2].age, 30. | units.Myr, 4)
self.assertAlmostRelativeEqual(instance.particles[0].mass, 1.22632 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[1].mass, 1.22632 | units.MSun, 4)
self.assertAlmostRelativeEqual(instance.particles[2].mass, 0.5 | units.MSun, 4)
```
#### File: test/codes_tests/test_twobody.py
```python
import numpy
from amuse.community.twobody import interface
from amuse.test.amusetest import TestWithMPI
from amuse.units import units
from amuse.units import nbody_system
from amuse.units.quantities import zero
from amuse.support.exceptions import AmuseException
from amuse import datamodel
class TwoBodyCodeTests(TestWithMPI):
def test_stumpff(self):
self.assertAlmostEqual(interface.stumpff_C(0),interface.stumpff_C(0.0001),5)
self.assertAlmostEqual(interface.stumpff_C(0),interface.stumpff_C(-0.0001),5)
self.assertAlmostEqual(interface.stumpff_S(0),interface.stumpff_S(0.0001),5)
self.assertAlmostEqual(interface.stumpff_S(0),interface.stumpff_S(-0.0001),5)
class TwoBodyInterfaceTests(TestWithMPI):
def test1(self):
instance = interface.TwoBodyInterface()
res1 = instance.new_particle(mass = 11.0, radius = 2.0, x = 0.0, y = 0.0, z = 0.0, vx = 0.0, vy = 0.0, vz = 0.0)
res2 = instance.new_particle(mass = 21.0, radius = 5.0, x = 10.0, y = 0.0, z = 0.0, vx = 10.0, vy = 0.0, vz = 0.0)
self.assertEquals(0, res1['index_of_the_particle'])
self.assertEquals(1, res2['index_of_the_particle'])
retrieved_state1 = instance.get_state(0)
retrieved_state2 = instance.get_state(1)
self.assertEquals(11.0, retrieved_state1['mass'])
self.assertEquals(21.0, retrieved_state2['mass'])
self.assertEquals(0.0, retrieved_state1['x'])
self.assertEquals(10.0, retrieved_state2['x'])
instance.stop()
def test2(self):
instance = interface.TwoBodyInterface()
res1 = instance.new_particle(mass = 10.0, radius = 0.0, x = -1.0, y = 0.0, z = 0.0, vx = 0.0, vy = 10.0, vz = 0.0)
res2 = instance.new_particle(mass = 10.0, radius = 0.0, x = 1.0, y = 0.0, z = 0.0, vx = 0.0, vy = -10.0, vz = 0.0)
ek=0.5*(10*100+10*100)
ep=-(10*10/2)
e,err=instance.get_kinetic_energy()
self.assertEquals(ek,e)
e,err=instance.get_potential_energy()
self.assertEquals(ep,e)
instance.stop()
class TwoBodyTests(TestWithMPI):
def test1(self):
convert_nbody = nbody_system.nbody_to_si(5.9742e24 | units.kg, 1e6| units.m)
instance = interface.TwoBody(convert_nbody)
instance.stop()
def test2(self):
convert_nbody = nbody_system.nbody_to_si(5.9742e24 | units.kg, 1e6| units.m)
instance = interface.TwoBody(convert_nbody)
p = datamodel.Particle()
p.mass = 5.9742e24 | units.kg
p.radius = 6.371e6 | units.m
p.position = [0.,7.e6,-1.2124e7] | units.m
p.velocity = [0.,2.6679e3,4.6210e3] | units.m/units.s
instance.particles.add_particle(p)
instance.evolve_model(3600.0 | units.s)
position = instance.particles[0].position
velocity = instance.particles[0].velocity
self.assertAlmostEqual(position.x.value_in(units.m),0.,7)
self.assertAlmostEqual(position.y.value_in(units.m)/(-3.30647600568e6),1.,7)
self.assertAlmostEqual(position.z.value_in(units.m)/7.40831575351e6,1.,7)
self.assertAlmostEqual(velocity.x.value_in(units.m / units.s),0.,7)
self.assertAlmostEqual(velocity.y.value_in(units.m / units.s)/(-8.29821376206e3),1.,7)
self.assertAlmostEqual(velocity.z.value_in(units.m / units.s)/(-0.972888312209e3),1.,7)
instance.stop()
def test3(self):
convert_nbody = nbody_system.nbody_to_si(5.9742e24 | units.kg, 1e6| units.m)
instance = interface.TwoBody(convert_nbody)
p = datamodel.Particle()
p.mass = 5.9742e24 | units.kg
p.radius = 7.1e6 | units.m
p.position = [0.,7.e6,-1.2124e7] | units.m
p.velocity = [0.,2.6679e3,4.6210e3] | units.m/units.s
instance.particles.add_particle(p)
instance.evolve_model(3600.0 | units.s)
dt = convert_nbody.to_si(instance.model_time)
self.assertAlmostEqual(dt.value_in(units.s)/2583.44780926,1.,7)
position = instance.particles[0].position
self.assertAlmostEqual(((position.x**2+position.y**2+position.z**2)/(7.1e6)**2).value_in(units.m**2),1.,7)
instance.stop()
def test4(self):
convert_nbody = nbody_system.nbody_to_si(5.9742e24 | units.kg, 1e6| units.m)
instance = interface.TwoBody(convert_nbody)
p = datamodel.Particle()
p.mass = 5.9742e24 | units.kg
p.radius = 7.1e6 | units.m
p.position = [0.,7.e6,-1.2124e7] | units.m
p.velocity = [0.,2.6679e3,4.6210e3] | units.m/units.s
instance.particles.add_particle(p)
instance.evolve_model(3600.0 | units.s)
dt = convert_nbody.to_si(instance.model_time)
self.assertEqual(instance.particles[0].mass,5.9742e24 | units.kg)
instance.particles[0].mass=0.8*5.9742e24 | units.kg
instance.evolve_model(4000.0 | units.s)
self.assertEqual(instance.particles.mass[0],0.8*5.9742e24 | units.kg)
instance.stop()
def test5(self):
#from: Fundamentals of Celestial Mechanics, <NAME> 2nd edition
instance = interface.TwoBody()
p = datamodel.Particle()
p.mass = 1.0 | nbody_system.mass
p.radius = 0.001 | nbody_system.length
p.position = [1.0, 0.1, -0.1] | nbody_system.length
p.velocity = [-0.1, 2.0, -0.2] | nbody_system.speed
instance.particles.add_particle(p)
instance.evolve_model(1.0|nbody_system.time)
self.assertAlmostEqual(instance.particles.x, 0.611238439231|nbody_system.length, 7)
self.assertAlmostEqual(instance.particles.y, 1.92873971354574|nbody_system.length, 7)
self.assertAlmostEqual(instance.particles.z, -0.2562478900031234|nbody_system.length, 7)
instance.stop()
def test6(self):
#from: Fundamentals of Celestial Mechanics, <NAME> 2nd edition
instance = interface.TwoBody()
p = datamodel.Particles(2)
p.mass = [1, 0.0] | nbody_system.mass
p.radius = 0.001 | nbody_system.length
p.x = [1.0, 0.0] | nbody_system.length
p.y = [0.1, 0.0] | nbody_system.length
p.z = [-0.1, 0.0] | nbody_system.length
p.vx = [-0.1, 0.0] | nbody_system.speed
p.vy = [2.0, 0.0] | nbody_system.speed
p.vz = [-0.2, 0.0] | nbody_system.speed
instance.particles.add_particles(p)
instance.evolve_model(1.0|nbody_system.time)
self.assertAlmostEqual(instance.particles.x[0] - instance.particles.x[1], 0.611238439231|nbody_system.length, 7)
self.assertAlmostEqual(instance.particles.y[0] - instance.particles.y[1], 1.92873971354574|nbody_system.length, 7)
self.assertAlmostEqual(instance.particles.z[0] - instance.particles.z[1], -0.2562478900031234|nbody_system.length, 7)
instance.stop()
def test7(self):
print "Test 7: get_gravity_at_point"
instance = interface.TwoBody()
p = datamodel.Particles(2)
p.mass = 0.5 | nbody_system.mass
p.radius = 0 | nbody_system.length
p.position = [[1.0, 0, 0], [-1.0, 0, 0]] | nbody_system.length
p.velocity = [[0]*3]*2 | nbody_system.speed
instance.particles.add_particles(p)
zero = [0.0] | nbody_system.length
ax, ay, az = instance.get_gravity_at_point(zero, 0.5 | nbody_system.length, zero, zero)
self.assertAlmostEqual(ax, 16/9.0 | nbody_system.acceleration)
self.assertAlmostEqual(ay, 0 | nbody_system.acceleration)
self.assertAlmostEqual(az, 0 | nbody_system.acceleration)
zero = [0.0]*4 | nbody_system.length
ax, ay, az = instance.get_gravity_at_point(zero, [0, 0.5, 1.5, 2.5] | nbody_system.length, zero, zero)
self.assertAlmostEqual(ax, [0, 16/9.0, -2.08, -0.26303854] | nbody_system.acceleration)
self.assertAlmostEqual(ay, 0 | nbody_system.acceleration)
self.assertAlmostEqual(az, 0 | nbody_system.acceleration)
zero = [0.0] | nbody_system.length
ax, ay, az = instance.get_gravity_at_point(zero, 1.0 | nbody_system.length, zero, zero)
self.assertTrue(numpy.isnan(ax[0].value_in(nbody_system.acceleration)))
self.assertTrue(numpy.isnan(ay[0].value_in(nbody_system.acceleration)))
self.assertTrue(numpy.isnan(az[0].value_in(nbody_system.acceleration)))
zero = [0.0] | nbody_system.length
eps = [0.1] | nbody_system.length
ax, ay, az = instance.get_gravity_at_point(eps, 1.0 | nbody_system.length, zero, zero)
self.assertAlmostEqual(ax, -0.12453271 | nbody_system.acceleration)
self.assertAlmostEqual(ay, 0 | nbody_system.acceleration)
self.assertAlmostEqual(az, 0 | nbody_system.acceleration)
instance.stop()
def test8(self):
print "Test 8: get_potential_at_point"
instance = interface.TwoBody()
p = datamodel.Particles(2)
p.mass = 0.5 | nbody_system.mass
p.radius = 0 | nbody_system.length
p.position = [[1.0, 0, 0], [-1.0, 0, 0]] | nbody_system.length
p.velocity = [[0]*3]*2 | nbody_system.speed
instance.particles.add_particles(p)
zero = [0.0] | nbody_system.length
phi = instance.get_potential_at_point(zero, 0.5 | nbody_system.length, zero, zero)
self.assertAlmostEqual(phi, -4/3.0 | nbody_system.potential)
zero = [0.0]*4 | nbody_system.length
phi = instance.get_potential_at_point(zero, [0, 0.5, 1.5, 2.5] | nbody_system.length, zero, zero)
self.assertAlmostEqual(phi, [-1, -4/3.0, -1.2, -0.47619047] | nbody_system.potential)
zero = [0.0] | nbody_system.length
phi = instance.get_potential_at_point(zero, 1.0 | nbody_system.length, zero, zero)
self.assertTrue(numpy.isinf(phi[0].value_in(nbody_system.potential)))
instance.stop()
def test9(self):
print "Test 9: TwoBody parameters"
instance = interface.TwoBody()
self.assertEqual(instance.parameters.epsilon_squared, zero)
self.assertRaises(AmuseException, setattr, instance.parameters, "epsilon_squared", zero,
expected_message = "Could not set value for parameter 'epsilon_squared' of a 'TwoBody' object, "
"parameter is read-only")
instance.stop()
def test10(self):
convert_nbody = nbody_system.nbody_to_si(1.0 | units.yr, 1.0 | units.AU)
instance = interface.TwoBody(convert_nbody)
value = instance.get_begin_time()
self.assertEquals(0.0| units.yr, value)
self.assertAlmostEquals(0.0 | units.yr, instance.parameters.begin_time, in_units=units.yr)
for x in [1.0, 10.0, 100.0]:
instance.parameters.begin_time = x | units.yr
self.assertAlmostEquals(x | units.yr, instance.parameters.begin_time, in_units=units.yr)
instance.stop()
def test11(self):
instance = interface.TwoBody()
p = datamodel.Particles(2)
p.mass = [1, 0.0] | nbody_system.mass
p.radius = 0.001 | nbody_system.length
p.x = [1.0, 0.0] | nbody_system.length
p.y = [0.1, 0.0] | nbody_system.length
p.z = [-0.1, 0.0] | nbody_system.length
p.vx = [-0.1, 0.0] | nbody_system.speed
p.vy = [2.0, 0.0] | nbody_system.speed
p.vz = [-0.2, 0.0] | nbody_system.speed
instance.particles.add_particles(p)
instance.evolve_model(0.5|nbody_system.time)
particles1 = instance.particles.copy()
instance.stop()
instance = interface.TwoBody()
instance.parameters.begin_time = 0.5 |nbody_system.time
instance.particles.add_particles(particles1)
instance.evolve_model(1.0|nbody_system.time)
self.assertAlmostEqual(instance.particles.x[0] - instance.particles.x[1], 0.611238439231|nbody_system.length, 7)
self.assertAlmostEqual(instance.particles.y[0] - instance.particles.y[1], 1.92873971354574|nbody_system.length, 7)
self.assertAlmostEqual(instance.particles.z[0] - instance.particles.z[1], -0.2562478900031234|nbody_system.length, 7)
instance.stop()
```
#### File: test/core_tests/test_binaryio.py
```python
from amuse.test import amusetest
from io import BytesIO
from collections import namedtuple
import os.path
import math
from amuse import io
from amuse.io import gadget
from amuse.io import nemobin
from amuse.units import nbody_system
from amuse.datamodel import Particles
class GadgetFileFormatProcessorTests(amusetest.TestCase):
header_parts = (
b'\x00\x01\x00\x00 N\x00\x00 \xa1\x07\x00 N\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\xf6`Q\xc6#\xcc\x9c>\x8d',
b'\xed\xb5\xa0\xf7\xc6\x90>\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 N\x00\x00 ',
b'\xa1\x07\x00 N\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00',
)
def test1(self):
header = b''.join(self.header_parts)
x = gadget.GadgetFileFormatProcessor()
file = BytesIO(header)
x.load_header(file)
print x.header_struct
self.assertEquals(x.header_struct.Npart[0], 20000)
self.assertEquals(x.header_struct.Npart[1], 500000)
self.assertEquals(x.header_struct.Npart[2], 20000)
self.assertEquals(x.header_struct.Npart[3], 0)
self.assertEquals(x.header_struct.Npart[4], 0)
self.assertEquals(x.header_struct.Npart[5], 0)
self.assertEquals(x.header_struct.Massarr[0], 0.0)
self.assertAlmostRelativeEqual(x.header_struct.Massarr[1], 4.2911501e-07, 8)
print x.header_struct.Massarr[2]
self.assertAlmostRelativeEqual(x.header_struct.Massarr[2], 2.5000000e-07, 8)
self.assertEquals(x.header_struct.FlagSfr, 0)
self.assertEquals(x.header_struct.FlagFeedback, 0)
self.assertEquals(x.header_struct.FlagAge, 0)
self.assertEquals(x.header_struct.HubbleParam, 0)
def test2(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'gadget_snapshot')
x = gadget.GadgetFileFormatProcessor()
file = open(filename,'rb')
result = x.load_file(file)
file.close()
self.assertEquals(len(result[0]), 1000)
self.assertEquals(len(result[1]), 10000)
def test3(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'gadget_snapshot')
x = gadget.GadgetFileFormatProcessor()
result = io.read_set_from_file(filename, format='gadget')
self.assertEquals(len(result[0]), 1000)
self.assertEquals(len(result[1]), 10000)
def test4(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'gassphere_littleendian.dat')
x = gadget.GadgetFileFormatProcessor()
result = io.read_set_from_file(filename, format='gadget')
self.assertEquals(len(result[0]), 1472)
self.assertEquals(len(result[1]), 0)
def test5(self):
options = io.get_options_for_format('gadget')
found_has_acceleration = False
for name, description, defaultval in options:
if name == 'has_acceleration':
found_has_acceleration = True
self.assertTrue(found_has_acceleration)
def test6(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'gassphere_littleendian.dat')
x = gadget.GadgetFileFormatProcessor()
gas, halo, disk, bulge, stars, bndry = io.read_set_from_file(filename, format='gadget')
self.assertEquals(len(gas), 1472)
self.assertEquals(len(halo), 0)
self.assertEquals(gas[0].key,1)
self.assertEquals(gas[1].key,2)
self.assertEquals(gas[2].key,3)
self.assertEquals(gas[1471].key,1472)
def test7(self):
"""test returned ids from gadget file
for ticket #245.
All the 'uneven' particles have key "1", and identical velocities/positions. This is incorrect
upon further inspection, the test file is incorrect
"""
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'ticket245.dat')
gas, halo, disk, bulge, stars, bndry = io.read_set_from_file(filename, format='gadget')
self.assertEquals(len(gas), 0)
self.assertEquals(len(halo),1324)
self.assertEquals(len(disk), 0)
self.assertEquals(len(bulge), 0)
self.assertEquals(len(stars), 0)
self.assertEquals(len(bndry), 0)
self.assertEquals(halo[0].key,544418538)
self.assertEquals(halo[1].key,544511335)
self.assertEquals(halo[2].key,544511457)
self.assertAlmostRelativeEquals(halo[0].velocity[0], -24.785614 | nbody_system.speed, 7)
print halo[1].velocity
self.assertAlmostRelativeEquals(halo[1].velocity[0], -25.346375 | nbody_system.speed, 7)
self.assertAlmostRelativeEquals(halo[2].velocity[0], -25.394440 | nbody_system.speed, 7)
def test8(self):
"""test returned ids from gadget file
for ticket #245.
added option to not use the ids as a key, should fix the problem
for incorrect id's
"""
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'ticket245.dat')
gas, halo, disk, bulge, stars, bndry = io.read_set_from_file(filename, format='gadget', ids_are_keys = False)
self.assertEquals(len(gas), 0)
self.assertEquals(len(halo),1324)
self.assertEquals(len(disk), 0)
self.assertEquals(len(bulge), 0)
self.assertEquals(len(stars), 0)
self.assertEquals(len(bndry), 0)
self.assertEquals(halo[0].id,544418538)
self.assertEquals(halo[1].id,544511335)
self.assertEquals(halo[2].id,544511457)
self.assertAlmostRelativeEquals(halo[0].velocity[0], -24.785614 | nbody_system.speed, 7)
self.assertAlmostRelativeEquals(halo[1].velocity[0], -25.346375 | nbody_system.speed, 7)
self.assertAlmostRelativeEquals(halo[2].velocity[0], -25.394440 | nbody_system.speed, 7)
def test9(self):
class FakeList(object):
def __init__(self, _len):
self._len = _len
def __len__(self):
return self._len
set = (FakeList(20000), FakeList(500000), FakeList(20000), (), (), ())
x = gadget.GadgetFileFormatProcessor(set = set)
x.equal_mass_array=(0.0,4.291150104743886e-07, 2.5e-07,0.0,0.0,0.0) |nbody_system.mass
file = BytesIO()
x.store_header(file)
print x.header_struct
self.assertEquals(x.header_struct.Npart[0], 20000)
self.assertEquals(x.header_struct.Npart[1], 500000)
self.assertEquals(x.header_struct.Npart[2], 20000)
self.assertEquals(x.header_struct.Npart[3], 0)
self.assertEquals(x.header_struct.Npart[4], 0)
self.assertEquals(x.header_struct.Npart[5], 0)
print repr(file.getvalue())
print repr(b''.join(self.header_parts))
self.assertEquals(repr(file.getvalue()[0:30]), repr(b''.join(self.header_parts)[0:30]))
def test10(self):
p = Particles(2)
p[0].position = [1.0, 2.0, 3.0] | nbody_system.length
p[1].position = [4.0, 5.0, 6.0] | nbody_system.length
p[0].velocity = [7.0, 8.0, 10.0] | nbody_system.length / nbody_system.time
p[1].velocity = [11.0, 12.0, 13.0] | nbody_system.length / nbody_system.time
p.u = [3,4] | nbody_system.potential
p.rho = [5,6] | nbody_system.density
p.mass = [5,6] | nbody_system.mass
x = gadget.GadgetFileFormatProcessor(set = p)
file = BytesIO()
x.store_body(file)
input = BytesIO(file.getvalue())
positions = x.read_fortran_block_float_vectors(input)
self.assertEquals(positions[0] , [1.0, 2.0, 3.0])
self.assertEquals(positions[1] , [4.0, 5.0, 6.0])
velocities = x.read_fortran_block_float_vectors(input)
self.assertEquals(velocities[0] , [7.0, 8.0, 10.0])
self.assertEquals(velocities[1] , [11.0, 12.0, 13.0])
ids = x.read_fortran_block_ulongs(input)
self.assertEquals(ids[0], p[0].key)
self.assertEquals(ids[1], p[1].key)
masses = x.read_fortran_block_floats(input)
self.assertEquals(masses[0], 5)
self.assertEquals(masses[1], 6)
u = x.read_fortran_block_floats(input)
self.assertEquals(u[0], 3)
self.assertEquals(u[1], 4)
def test11(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'gassphere_littleendian.dat')
gas, halo, disk, bulge, stars, bndry = io.read_set_from_file(filename, format='gadget')
self.assertEquals(len(gas), 1472)
self.assertEquals(len(halo), 0)
self.assertEquals(gas[0].key,1)
self.assertEquals(gas[1].key,2)
self.assertEquals(gas[2].key,3)
self.assertEquals(gas[1471].key,1472)
self.assertAlmostRelativeEquals(gas[0:5].x,[-0.0713372901082, 0.0713372901082, -0.21178227663, -0.0698266476393, 0.0698266476393] | nbody_system.length, 7)
self.assertAlmostRelativeEquals(gas[0:5].u, [0.0500000007451, 0.0500000007451, 0.0500000007451, 0.0500000007451, 0.0500000007451] | (nbody_system.length / nbody_system.time)**2, 7 )
outputfilename = 'gadgettest.output'
try:
io.write_set_to_file((gas, halo, disk, bulge, stars, bndry), outputfilename, format='gadget', ids_are_long = False)
gas, halo, disk, bulge, stars, bndry = io.read_set_from_file(outputfilename, format='gadget')
self.assertEquals(len(gas), 1472)
self.assertEquals(len(halo), 0)
self.assertEquals(gas[0].key,1)
self.assertEquals(gas[1].key,2)
self.assertEquals(gas[2].key,3)
self.assertEquals(gas[1471].key,1472)
finally:
if os.path.exists(outputfilename):
os.remove(outputfilename)
def test12(self):
print "Test return_header for Gadget read_set_from_file"
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'gassphere_littleendian.dat')
data = io.read_set_from_file(filename, format='gadget', return_header=False) # (default)
self.assertTrue(isinstance(data, tuple))
self.assertEquals(data.__doc__, "GadgetData(gas, halo, disk, bulge, stars, bndry)")
data = io.read_set_from_file(filename, format='gadget', return_header=True)
self.assertTrue(isinstance(data, tuple))
self.assertEquals(data.__doc__, "GadgetData(gas, halo, disk, bulge, stars, bndry, "
"Npart, Massarr, Time, Redshift, FlagSfr, FlagFeedback, Nall, FlagCooling, "
"NumFiles, BoxSize, Omega0, OmegaLambda, HubbleParam, FlagAge, FlagMetals, "
"NallHW, flag_entr_ics)")
self.assertEquals(len(data.gas), 1472)
self.assertEquals(len(data.halo), 0)
self.assertEquals(data.gas[0].key,1)
self.assertEquals(data.gas[1].key,2)
self.assertEquals(data.gas[2].key,3)
self.assertEquals(data.gas[1471].key,1472)
self.assertAlmostRelativeEquals(data.gas[0:5].x,[-0.0713372901082, 0.0713372901082, -0.21178227663, -0.0698266476393, 0.0698266476393] | nbody_system.length, 7)
self.assertAlmostRelativeEquals(data.gas[0:5].u, [0.0500000007451, 0.0500000007451, 0.0500000007451, 0.0500000007451, 0.0500000007451] | (nbody_system.length / nbody_system.time)**2, 7 )
self.assertEquals(data.Npart, (1472, 0, 0, 0, 0, 0))
self.assertEquals(data.Time, 0.0)
self.assertEquals(data.Redshift, 0.0)
def test13(self):
print "Test convert_gadget_w_to_velocity and return_header for Gadget read_set_from_file"
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'tiny_lcdm_data_littleendian.dat')
data = io.read_set_from_file(filename, format='gadget', return_header=False, convert_gadget_w_to_velocity=False) # (default)
self.assertTrue(isinstance(data, tuple))
self.assertEquals(data.__doc__, "GadgetData(gas, halo, disk, bulge, stars, bndry)")
self.assertEquals(len(data.gas), 32)
self.assertEquals(len(data.halo), 32)
self.assertEquals(data.gas[0].key, 1)
self.assertEquals(data.halo[0].key, 32**3 + 1)
self.assertAlmostRelativeEquals(data.gas[:3].position, [[395.23443604, 395.75210571, 1244.31152344],
[310.17266846, 440.21728516, 2817.06396484], [191.95669556, 465.57223511, 4430.20068359]] | nbody_system.length, 7)
data_converted = io.read_set_from_file(filename, format='gadget', return_header=True, convert_gadget_w_to_velocity=True)
self.assertTrue(isinstance(data_converted, tuple))
self.assertEquals(data_converted.__doc__, "GadgetData(gas, halo, disk, bulge, stars, bndry, "
"Npart, Massarr, Time, Redshift, FlagSfr, FlagFeedback, Nall, FlagCooling, "
"NumFiles, BoxSize, Omega0, OmegaLambda, HubbleParam, FlagAge, FlagMetals, "
"NallHW, flag_entr_ics)")
self.assertEquals(len(data_converted.gas), 32)
self.assertEquals(len(data_converted.halo), 32)
self.assertEquals(data_converted.gas[0].key, 1)
self.assertEquals(data_converted.halo[0].key, 32**3 + 1)
self.assertEquals(data_converted.Npart, (32, 32, 0, 0, 0, 0))
self.assertEquals(data_converted.Time, 1/11.0)
self.assertEquals(data_converted.Redshift, 10.0)
self.assertEquals(data.gas.position, data_converted.gas.position)
self.assertAlmostRelativeEquals(data.gas.velocity, math.sqrt(data_converted.Time) * data_converted.gas.velocity, 7)
class NemoBinaryFileFormatProcessorTests(amusetest.TestCase):
def test1(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'plummer128.nemo')
file = open(filename, 'rb')
nemofile = nemobin.NemoBinaryFile(file)
tagcharacter, tagstring, dim, mustswap = nemofile.get_item_header()
self.assertEquals(tagcharacter, 'c')
self.assertEquals(tagstring, 'Headline')
self.assertEquals(len(dim), 1)
self.assertEquals(dim[0], 28)
file.close()
def test2(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'plummer128.nemo')
file = open(filename, 'rb')
nemofile = nemobin.NemoBinaryFile(file)
item = nemofile.read_item()
self.assertEquals(item.data, "init_xrandom: seed used 123")
file.close()
def test3(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'plummer128.nemo')
file = open(filename, 'rb')
nemofile = nemobin.NemoBinaryFile(file)
data = nemofile.read()
file.close()
self.assertEquals(len(data), 3)
tags = list(data.keys())
self.assertEquals(tags[0], 'Headline')
self.assertEquals(tags[1], 'History')
self.assertEquals(tags[2], 'SnapShot')
self.assertEquals(data['History'][0].data, 'mkplummer out=plummer128.nemo nbody=128 seed=123 VERSION=2.8b')
self.assertEquals(len(data['SnapShot'][0].data), 2)
tags = list(data['SnapShot'][0].data.keys())
self.assertEquals(tags[0], 'Parameters')
self.assertEquals(tags[1], 'Particles')
def test4(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'plummer128.nemo')
file = open(filename, 'rb')
x = nemobin.NemoBinaryFileFormatProcessor()
set = x.load_file(file).previous_state()
file.close()
self.assertEquals(len(set), 128)
self.assertEquals(set.get_timestamp(), 0.0 | nbody_system.time)
self.assertAlmostRelativeEquals(set.kinetic_energy(), 0.230214395174 | nbody_system.energy, 8)
self.assertAlmostRelativeEquals(set.potential_energy(G=nbody_system.G), -0.473503040144 | nbody_system.energy, 8)
def test5(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'plummer128.nemo')
file = open(filename, 'rb')
nemofile = nemobin.NemoBinaryFile(file)
data = nemofile.read()
file.close()
outputfile = BytesIO()
nemooutputfile = nemobin.NemoBinaryFile(outputfile)
nemooutputfile.write(data)
string = outputfile.getvalue()
outputfile.close()
inputfile = BytesIO(string)
nemoinputfile = nemobin.NemoBinaryFile(inputfile)
tagcharacter, tagstring, dim, mustswap = nemoinputfile.get_item_header()
self.assertEquals(tagcharacter, 'c')
self.assertEquals(tagstring, 'Headline')
self.assertEquals(len(dim), 1)
self.assertEquals(dim[0], 28)
inputfile.close()
def test6(self):
inputfile = BytesIO()
nemoinputfile = nemobin.NemoBinaryFile(inputfile)
data = nemoinputfile.read()
self.assertEquals(len(data), 0)
def test7(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'plummer128.nemo')
file = open(filename, 'rb')
nemofile = nemobin.NemoBinaryFile(file)
data = nemofile.read()
file.close()
outputfile = BytesIO()
nemooutputfile = nemobin.NemoBinaryFile(outputfile)
nemooutputfile.write(data)
string = outputfile.getvalue()
outputfile.close()
file = open(filename, 'rb')
original = file.read()
file.close()
self.assertEquals(len(original), len(string))
self.assertEquals(original, string)
def test8(self):
directory_name = os.path.dirname(__file__)
filename = os.path.join(directory_name, 'plummer128.nemo')
file = open(filename, 'rb')
x = nemobin.NemoBinaryFileFormatProcessor()
set = x.load_file(file)
file.close()
outputfile = BytesIO()
y = nemobin.NemoBinaryFileFormatProcessor()
y.set = set
y.store_file(outputfile)
string = outputfile.getvalue()
outputfile.close()
inputfile = BytesIO(string)
x = nemobin.NemoBinaryFileFormatProcessor()
set = x.load_file(inputfile)
inputfile.close()
self.assertEquals(len(set), 128)
self.assertAlmostRelativeEquals(set.kinetic_energy(), 0.230214395174 | nbody_system.energy, 8)
self.assertAlmostRelativeEquals(set.potential_energy(G=nbody_system.G), -0.473503040144 | nbody_system.energy, 8)
def test9(self):
filename = os.path.join(os.path.dirname(__file__), 'plummer128.nemo')
particles = io.read_set_from_file(filename, format="nemobin")
self.assertEquals(len(particles), 128)
self.assertAlmostEquals(particles.total_mass(), 1.0 | nbody_system.mass)
self.assertAlmostEquals(particles.center_of_mass(), 0.0 | nbody_system.length)
self.assertAlmostEquals(particles.center_of_mass_velocity(), 0.0 | nbody_system.speed)
self.assertAlmostEquals(particles.kinetic_energy(), 0.230214395174 | nbody_system.energy)
```
#### File: test/core_tests/test_optparse.py
```python
from amuse.test import amusetest
import numpy
import sys
from amuse.support.exceptions import AmuseException
from amuse.units.quantities import *
from amuse.units import si
from amuse.units import units
from amuse.units import nbody_system
from amuse import datamodel
from amuse.units import optparse
class TestQuantities(amusetest.TestCase):
def test1(self):
x = optparse.OptionParser()
x.add_option('-m', unit = units.MSun, dest = "mass", type = float)
options, args = x.parse_args(['-m', '2.0'])
self.assertAlmostRelativeEquals(options.mass, 2.0 | units.MSun)
def test2(self):
x = optparse.OptionParser()
x.add_option('-m', unit = units.MSun, default = 1 | units.MSun, dest = "mass", type = float)
options, args = x.parse_args(['bla'])
self.assertAlmostRelativeEquals(options.mass, 1.0 | units.MSun)
self.assertEquals(args[0], 'bla')
def test3(self):
x = optparse.OptionParser()
x.add_option('-m', unit = units.MSun, default = '1.5', dest = "mass", type = float)
options, args = x.parse_args(['bla'])
self.assertAlmostRelativeEquals(options.mass, 1.5 | units.MSun)
self.assertEquals(args[0], 'bla')
def test4(self):
x = optparse.OptionParser()
x.add_option('-m', unit = units.MSun, help = "(unit: %unit)", default = '1.5', dest = "mass", type = float)
helpstr = x.format_help()
print helpstr
self.assertTrue('(unit: MSun)' in helpstr)
x = optparse.OptionParser()
x.add_option('-m', unit = nbody_system.mass, help = "(unit: %unit)", default = '1.5', dest = "mass", type = float)
helpstr = x.format_help()
print helpstr
self.assertTrue('(unit: mass)' in helpstr)
def test5(self):
x = optparse.OptionParser()
x.add_option('-m', unit = units.MSun, default = 1.5, dest = "mass", type = float)
options, args = x.parse_args(['bla'])
self.assertAlmostRelativeEquals(options.mass, 1.5 | units.MSun)
self.assertEquals(args[0], 'bla')
def test6(self):
x = optparse.OptionParser()
x.add_option('-m', unit = units.MSun, help = "(default: %default, unit: %unit)", default = '1.5', dest = "mass", type = float)
helpstr = x.format_help()
print helpstr
self.assertTrue('unit: MSun)' in helpstr)
self.assertTrue('(default: 1' in helpstr)
```
#### File: test/core_tests/test_particles_properties.py
```python
import numpy
import time
import sys
import pickle
from amuse.test import amusetest
from amuse.units import units
from amuse.units import constants
from amuse.units import nbody_system
from amuse.support.exceptions import AmuseException
from amuse.support.interface import InCodeComponentImplementation
from amuse import datamodel
class TestParticlesProperties(amusetest.TestCase):
def test1(self):
particles = datamodel.Particles(2)
particles.mass = 10 | units.kg
self.assertTrue(hasattr(particles, 'collection_attributes'))
particles.collection_attributes.timestamp = 1 | units.yr
self.assertEquals(particles.collection_attributes.timestamp, 1 | units.yr)
particles.collection_attributes.a = 2
self.assertEquals(particles.collection_attributes.a, 2)
def test2(self):
particles = datamodel.Particles(2)
particles.collection_attributes.timestamp = 1 | units.yr
self.assertEquals(str(particles.collection_attributes), "timestamp: 1 yr")
particles.collection_attributes.a = 2
self.assertEquals(str(particles.collection_attributes), "timestamp: 1 yr\na: 2")
def test3(self):
particles1 = datamodel.Particles(2)
particles1.collection_attributes.timestamp = 1 | units.yr
particles1.collection_attributes.a = 2
particles2 = particles1.copy()
self.assertEquals(particles2.collection_attributes.timestamp, 1 | units.yr)
self.assertEquals(particles2.collection_attributes.a, 2)
self.assertEquals(str(particles2.collection_attributes), "timestamp: 1 yr\na: 2")
def test4(self):
particles1 = datamodel.Particles(2)
particles1.collection_attributes.timestamp = 1 | units.yr
particles1.collection_attributes.a = 2
pickled_string = pickle.dumps(particles1)
particles2 = pickle.loads(pickled_string)
self.assertEquals(particles2.collection_attributes.timestamp, 1 | units.yr)
self.assertEquals(particles2.collection_attributes.a, 2)
```
#### File: test/core_tests/test_stopping_conditions.py
```python
from amuse.test import amusetest
from amuse.support.exceptions import AmuseException
from amuse.community.interface.stopping_conditions import StoppingConditions
from amuse import datamodel
from amuse.units import units
from amuse.support import interface
class TestStoppingCondition(amusetest.TestCase):
def test1(self):
class AllEnabled(object):
def is_stopping_condition_enabled(self, sc_type):
return 1
def has_stopping_condition(self, sc_type):
return 1
instance = StoppingConditions(AllEnabled())
self.assertTrue(instance.collision_detection.is_supported())
self.assertTrue(instance.collision_detection.is_enabled())
self.assertTrue(instance.escaper_detection.is_supported())
self.assertTrue(instance.escaper_detection.is_enabled())
self.assertTrue(instance.timeout_detection.is_supported())
self.assertTrue(instance.timeout_detection.is_enabled())
def test2(self):
class OneEnabled(object):
def is_stopping_condition_enabled(self, sc_type):
return 1 if sc_type == 0 else 0
def has_stopping_condition(self, sc_type):
return 1 if sc_type == 0 else 0
instance = StoppingConditions(OneEnabled())
self.assertTrue(instance.collision_detection.is_supported())
self.assertTrue(instance.collision_detection.is_enabled())
self.assertFalse(instance.escaper_detection.is_supported())
self.assertFalse(instance.escaper_detection.is_enabled())
self.assertFalse(instance.timeout_detection.is_supported())
self.assertFalse(instance.timeout_detection.is_enabled())
def test3(self):
class OneSettable(object):
is_enabled = 0
def is_stopping_condition_enabled(self, sc_type):
print sc_type, self.is_enabled
return self.is_enabled if sc_type == 0 else 0
def has_stopping_condition(self, sc_type):
return 1 if sc_type == 0 else 0
def enable_stopping_condition(self, sc_type):
if sc_type == 0:
self.is_enabled = 1
instance = StoppingConditions(OneSettable())
self.assertTrue(instance.collision_detection.is_supported())
self.assertFalse(instance.collision_detection.is_enabled())
instance.collision_detection.enable()
self.assertTrue(instance.collision_detection.is_enabled())
def test4(self):
class OneSettable(object):
is_enabled = 0
def is_stopping_condition_enabled(self, sc_type):
print sc_type, self.is_enabled
return self.is_enabled if sc_type == 0 else 0
def has_stopping_condition(self, sc_type):
return 1 if sc_type == 0 else 0
def enable_stopping_condition(self, sc_type):
if sc_type == 0:
self.is_enabled = 1
def disable_stopping_condition(self, sc_type):
if sc_type == 0:
self.is_enabled = 0
instance = StoppingConditions(OneSettable())
self.assertTrue(instance.collision_detection.is_supported())
self.assertFalse(instance.collision_detection.is_enabled())
instance.collision_detection.enable()
self.assertTrue(instance.collision_detection.is_enabled())
instance.collision_detection.disable()
self.assertFalse(instance.collision_detection.is_enabled())
def test5(self):
class OneEnabled(object):
def is_stopping_condition_enabled(self, sc_type):
return 1 if sc_type == 0 else 0
def has_stopping_condition(self, sc_type):
return 1 if sc_type == 0 else 0
instance = StoppingConditions(OneEnabled())
self.assertFalse(instance.escaper_detection.is_supported())
self.assertFalse(instance.escaper_detection.is_enabled())
self.assertRaises(AmuseException, instance.escaper_detection.enable)
self.assertRaises(AmuseException, instance.escaper_detection.disable)
def test6(self):
class Collision(object):
def is_stopping_condition_enabled(self, sc_type):
return 1 if sc_type == 0 else 0
def has_stopping_condition(self, sc_type):
return 1 if sc_type == 0 else 0
def is_stopping_condition_set(self, sc_type):
return 1 if sc_type == 0 else 0
def get_number_of_stopping_conditions_set(self):
return 1
def get_stopping_condition_info(self, indices):
return [0],[1]
instance = StoppingConditions(Collision())
instance.code.particles = datamodel.Particles(3)
instance.code.particles.mass = (1,2,3) | units.kg
instance.code.particles.add_function_attribute(
"get_stopping_condition_particle_index",
lambda particles, indices, sc_type : particles[indices]
)
self.assertTrue(instance.collision_detection.is_set())
particles = instance.collision_detection.particles(0)
self.assertEquals(len(particles),1)
print particles[0]
self.assertAlmostRelativeEqual(particles[0].mass, 1|units.kg)
def test7(self):
class Collision(object):
def is_stopping_condition_enabled(self, sc_type):
return 1 if sc_type == 0 else 0
def has_stopping_condition(self, sc_type):
return 1 if sc_type == 0 else 0
def is_stopping_condition_set(self, sc_type):
return 1 if sc_type == 0 else 0
def get_number_of_stopping_conditions_set(self):
return 1
def get_stopping_condition_info(self, indices):
return [0],[3]
instance = StoppingConditions(Collision())
instance.code.particles = datamodel.Particles(3)
instance.code.particles.mass = (1,2,3) | units.kg
instance.code.particles.add_function_attribute(
"get_stopping_condition_particle_index",
lambda particles, indices, sc_type : particles[indices]
)
self.assertTrue(instance.collision_detection.is_set())
particles = instance.collision_detection.particles(0)
self.assertEquals(len(particles),1)
particles = instance.collision_detection.particles(1)
self.assertEquals(len(particles),1)
particles = instance.collision_detection.particles(2)
self.assertEquals(len(particles),1)
particles = instance.collision_detection.particles(3)
self.assertEquals(len(particles),0)
```
#### File: test/ext_tests/test_galactic_potentials.py
```python
from amuse.test import amusetest
from amuse.units import units, nbody_system, constants
from amuse.ext.galactic_potentials import NFW_profile, MiyamotoNagai_profile, Plummer_profile, \
PowerLawCutoff_profile, MWpotentialBovy2015, scipy_imported
import numpy
class TestNFWProfile(amusetest.TestCase):
def test1(self):
"""
See the textbook by L.Aguilar, chapter 3:
ftp://ftp.crya.unam.mx/pub/luisfr/laguilar/GH05_Aguilar.pdf
"""
rho0 = 1.e3|units.MSun/units.parsec**3
rs = 6.6|units.kpc
nfw_profile = NFW_profile(rho0, rs)
m0 = 4.*numpy.pi*rho0*rs**3
phi0 = -4.*numpy.pi*constants.G*rho0*rs**2
ar0 = -phi0/rs
# relative mass enclosed at rs: m_at_rs / m0 ~ 0.193147
m_at_rs = nfw_profile.enclosed_mass(rs)
self.assertAlmostEqual(m_at_rs/m0, 0.193147, 5)
# relative mass enclosed at 5.3054*rs ~ m0
m_eq_one = nfw_profile.enclosed_mass(5.3054*rs,)
self.assertAlmostEqual(m_eq_one/m0, 1.0, 5)
# relative mass density, rho(rs) ~ rho0/4
rho_rs = nfw_profile.mass_density(rs)
self.assertAlmostEqual(rho0/rho_rs, 4.0, 5)
# relative gravitational potential at (r/rs)->0 is approaching 1
phi_at_0 = nfw_profile.get_potential_at_point(0.|units.m,0.|units.m,0.|units.m,1.e-10|units.kpc)
self.assertAlmostEqual(phi_at_0/phi0, 1.0, 4)
# relative force at (r/rs)->0 is approaching -1/2
ar_at_0 = nfw_profile.radial_force(1.e-5|units.kpc)
self.assertAlmostEqual(ar_at_0/ar0, -0.5, 4)
# relative force at (r/rs)->inf is approaching 0
ar_at_inf = nfw_profile.radial_force(1.e10|units.kpc)
self.assertAlmostEqual(ar_at_inf/ar0, 0.0, 5)
# relative circular velocity has maximum at r/r_s=2.16258
vc_eq_max = nfw_profile.circular_velocity(2.16258*rs)
vc_at_r_lt_max = nfw_profile.circular_velocity(2.16248*rs)
vc_at_r_gt_max = nfw_profile.circular_velocity(2.16268*rs)
self.assertTrue(vc_at_r_lt_max < vc_eq_max)
self.assertTrue(vc_at_r_gt_max < vc_eq_max)
class TestPlummerProfile(amusetest.TestCase):
def test1(self):
mass = 6.e6|units.MSun
a = 6.|units.parsec
plummer_profile = Plummer_profile(mass,a)
rho0 = mass/(4./3.*numpy.pi*a**3)
phi0 = -constants.G*mass/a
# enclosed mass at R>>a is total mass
m_tot = plummer_profile.enclosed_mass(a*1.e5)
self.assertAlmostEqual(m_tot/mass, 1.0, 5)
# mass density at the center
rho_cen = plummer_profile.mass_density(0.|units.m)
self.assertAlmostEqual(rho_cen/rho0, 1.0, 5)
# potential at r=a is phi0/sqrt(2)
phi_at_a = plummer_profile.get_potential_at_point(0.|units.m,0.|units.m,0.|units.m,a)
self.assertAlmostEqual(phi_at_a/phi0*numpy.sqrt(2.), 1.0, 5)
class TestMiyamotoNagaiProfile(amusetest.TestCase):
def test1(self):
mass = 6.6e10|units.MSun
a = 3.33|units.kpc
b = 0.666|units.kpc
profile = MiyamotoNagai_profile(mass,a,b)
r_force = profile.force_R(0.1*a,0.2*b,1.2*a).in_(units.parsec/units.Myr**2)
z_force = profile.force_z(0.1*a,0.2*b,1.2*a).in_(units.parsec/units.Myr**2)
potential = profile.get_potential_at_point(0.|units.m,a*0.1,a*5.,b*0.5).in_(units.kpc**2/units.Myr**2)
ax,ay,az = profile.get_gravity_at_point(0.|units.m,a*0.1,a*5.,b*0.5)
density = profile.mass_density(0.1*a,-0.5*b,1.2*a).in_(units.MSun/units.kpc**3)
vc = profile.circular_velocity_at_z0(1.0*a).in_(units.kms)
m_r = profile.equivalent_enclosed_mass_in_plane(100.*a).in_(units.MSun)
self.assertAlmostEqual(r_force, -0.263920797645|units.parsec/units.Myr**2, 12)
self.assertAlmostEqual(z_force, -5.35763387945 |units.parsec/units.Myr**2, 11)
self.assertAlmostEqual(potential, -0.017321166943|units.kpc**2/units.Myr**2, 12)
self.assertAlmostEqual(ax, -0.0196231550925|units.parsec/units.Myr**2, 12)
self.assertAlmostEqual(ay, -0.981157754625|units.parsec/units.Myr**2, 12)
self.assertAlmostEqual(az, -0.107380572532 |units.parsec/units.Myr**2, 12)
self.assertAlmostEqual(density, 1336672.32264|units.MSun/units.kpc**3, 5)
self.assertAlmostEqual(vc, 149.569512197|units.kms, 9)
self.assertAlmostEqual(m_r, 65.9857465656|1.e9*units.MSun, 9)
def test2(self):
mass = 1.984e4|units.MSun
a = 0.0|units.parsec
b = 6.66|units.parsec
nm_profile = MiyamotoNagai_profile(mass,a,b)
plummer_profile = Plummer_profile(mass,b)
pot_nm = nm_profile.get_potential_at_point(0.|units.m,b*0.1,b*5.,b*0.2)
pot_p = plummer_profile.get_potential_at_point(0.|units.m,b*0.1,b*5.,b*0.2)
self.assertEqual(pot_nm,pot_p)
ax_nm,ay_nm,az_nm = nm_profile.get_gravity_at_point(0.|units.m,b*0.1,b*5.,b*0.1)
ax_p,ay_p,az_p = plummer_profile.get_gravity_at_point(0.|units.m,b*0.1,b*5.,b*0.1)
print ax_nm.in_(units.parsec/units.Myr**2), ax_p.in_(units.parsec/units.Myr**2)
self.assertAlmostEqual(ax_nm.in_(units.parsec/units.Myr**2),ax_p.in_(units.parsec/units.Myr**2), 12)
self.assertAlmostEqual(ay_nm.in_(units.parsec/units.Myr**2),ay_p.in_(units.parsec/units.Myr**2), 12)
self.assertAlmostEqual(az_nm.in_(units.parsec/units.Myr**2),az_p.in_(units.parsec/units.Myr**2), 12)
rho_nm = nm_profile.mass_density(b*0.,b*0.,b*6.6)
rho_p = plummer_profile.mass_density(b*6.6)
self.assertEqual(rho_nm,rho_p)
class TestPowerLawCutoff_profile(amusetest.TestCase):
def test1(self):
rho0 = 12.|units.MSun/units.parsec**3
r0 = 1.6|units.parsec
alpha = 1.6
rc = 0.66|units.kpc
power_law = PowerLawCutoff_profile(rho0,r0,alpha,rc)
r_force = power_law.radial_force(0.1*r0).in_(units.parsec/units.Myr**2)
potential = power_law.get_potential_at_point(0.|units.m,r0*0.1,r0*5.,r0*0.5).in_(units.kpc**2/units.Myr**2)
ax,ay,az = power_law.get_gravity_at_point(0.|units.m,r0*0.1,r0*5.,r0*0.5)
density = power_law.mass_density(6.6*r0).in_(units.MSun/units.parsec**3)
vc = power_law.circular_velocity(r0).in_(units.kms)
m_r = power_law.enclosed_mass(100.*r0).in_(units.MSun)
self.assertAlmostEqual(r_force, -3.08704194743 |units.parsec/units.Myr**2, 10)
self.assertAlmostEqual(potential, -3.8425981846|1.e-5*units.kpc**2/units.Myr**2, 10)
self.assertAlmostEqual(ax, -0.00585557189412|units.parsec/units.Myr**2, 10)
self.assertAlmostEqual(ay, -0.292778594706|units.parsec/units.Myr**2, 10)
self.assertAlmostEqual(az, -0.0292778594706|units.parsec/units.Myr**2, 10)
self.assertAlmostEqual(density, 0.585867989506 |units.MSun/units.parsec**3, 10)
self.assertAlmostEqual(vc, 1.0891472277|units.kms, 10)
self.assertAlmostEqual(m_r, 2.71756907682 |1.e5*units.MSun, 9)
def setUp(self):
if not scipy_imported:
self.skip("scipy not installed")
class TestMWpotentialBovy2015(amusetest.TestCase):
def test1(self):
"""
See Table 1 of Bovy 2015, http://adsabs.harvard.edu/abs/2015ApJS..216...29B
"""
mw = MWpotentialBovy2015()
r0 = 8.|units.kpc
v0 = 220.|units.kms
# total mass density at r=r0,z=0
rho_r0_z0 = mw.mass_density(r0,0.|units.m,0.|units.m)
self.assertAlmostEqual(rho_r0_z0, 0.10|units.MSun/units.parsec**3, 2)
# halo mass density at r0
rho_halo_at_r0 = mw.halo.mass_density(r0)
self.assertAlmostEqual(rho_halo_at_r0, 0.008|units.MSun/units.parsec**3, 3)
# mass enclosed in 60kpc
mass_in_60 = mw.enclosed_mass(60.|units.kpc)
print mass_in_60.in_(units.MSun)
self.assertAlmostEqual((mass_in_60/1.e11).in_(units.MSun), 4.08|units.MSun, 2)
# normalization factor for bulge
fr_r0_v0 = v0**2 / r0
fr_r0_bulge = -mw.bulge.radial_force(r0)
self.assertAlmostEqual(fr_r0_bulge/fr_r0_v0, 0.05, 3)
# normalization factor for disk
fr_r0_disk = -mw.disk.force_R(r0,0.|units.m,0.|units.m)
self.assertAlmostEqual(fr_r0_disk/fr_r0_v0, 0.6, 3)
# normalization factor for halo
fr_r0_halo = -mw.halo.radial_force(r0)
print fr_r0_halo/fr_r0_v0
self.assertAlmostEqual(fr_r0_halo/fr_r0_v0, 0.35, 3)
def setUp(self):
if not scipy_imported:
self.skip("scipy not installed")
```
#### File: test/ext_tests/test_grid_to_sph.py
```python
import os.path
import numpy
from amuse.test.amusetest import get_path_to_results, TestWithMPI
try:
from matplotlib import pyplot
HAS_MATPLOTLIB = True
from amuse.plot import plot, semilogy, xlabel, ylabel, loglog
except ImportError:
HAS_MATPLOTLIB = False
from amuse.support.exceptions import AmuseException
from amuse.ext.grid_to_sph import Grid2SPH, convert_grid_to_SPH
from amuse.units import units
from amuse.units import generic_unit_system
from amuse.units import nbody_system
from amuse.units import constants
from amuse.units.generic_unit_converter import ConvertBetweenGenericAndSiUnits
from amuse.datamodel import Particles
from amuse.datamodel import Particle
from amuse.datamodel import ParticlesSuperset
from amuse.datamodel import Grid
def create_grid(*arg):
grid=Grid.create(*arg)
grid.add_vector_attribute("momentum", ["rhovx","rhovy","rhovz"])
return grid
class TestGrid2SPH(TestWithMPI):
def setup_simple_grid(self):
test_grid = create_grid((4,3,2), [1.0, 1.0, 1.0] | units.m)
test_grid.rho = numpy.linspace(1.0, 2.0, num=24).reshape(test_grid.shape) | units.kg/units.m**3
test_grid.rhovx = test_grid.rho * (3.0 | units.m/units.s)
test_grid.rhovy = test_grid.rho * (4.0 | units.m/units.s)
test_grid.rhovz = test_grid.rho * (0.0 | units.m/units.s)
test_grid.energy = test_grid.rho * ((1.0 | (units.m/units.s)**2) + 0.5 * (5.0 | units.m/units.s)**2)
return test_grid
def test0(self):
print "Testing the simple example grid"
test_grid = self.setup_simple_grid()
self.assertEqual(test_grid.position[ 0][ 0][ 0], [1.0/8.0, 1.0/6.0, 1.0/4.0] | units.m)
self.assertEqual(test_grid.position[-1][-1][-1], [7.0/8.0, 5.0/6.0, 3.0/4.0] | units.m)
self.assertEqual(test_grid.momentum[ 0][ 0][ 0], [3.0, 4.0, 0.0] | (units.kg/units.m**3) * (units.m/units.s))
self.assertEqual(test_grid.momentum[-1][-1][-1], [6.0, 8.0, 0.0] | (units.kg/units.m**3) * (units.m/units.s))
self.assertEqual(test_grid.energy[ 0][ 0][ 0], 13.5 | (units.J/units.m**3))
self.assertEqual(test_grid.energy[-1][-1][-1], 27.0 | (units.J/units.m**3))
def test1(self):
print "Testing the converter"
number_of_particles = 10000
test_grid = self.setup_simple_grid()
converter = Grid2SPH(test_grid, number_of_particles)
self.assertTrue(converter.grid is test_grid)
self.assertEqual(converter.shape, (4,3,2))
self.assertEqual(converter.number_of_sph_particles, number_of_particles)
self.assertEqual(converter.base_distribution_type, "uniform")
converter.setup_lookup_tables()
converter.setup_variates()
self.assertEqual(converter.cumulative_weight[0], 1.0/(1.5*4*3*2))
self.assertEqual(converter.cumulative_weight[-1], 1.0)
self.assertEqual(converter.position_lookup_table[0], [1.0/8.0, 1.0/6.0, 1.0/4.0] | units.m)
self.assertEqual(converter.position_lookup_table[-1], [7.0/8.0, 5.0/6.0, 3.0/4.0] | units.m)
self.assertEqual(converter.position_lookup_table[9], [3.0/8.0, 3.0/6.0, 3.0/4.0] | units.m)
self.assertAlmostEqual(converter.velocity_lookup_table, [3.0, 4.0, 0.0] | units.m/units.s)
self.assertAlmostEqual(converter.specific_internal_energy_lookup_table, 1.0 | units.J/units.kg)
self.assertEqual(converter.cellsize_unit, units.m)
self.assertTrue(converter.cellsize_unit is units.m)
self.assertAlmostEqual(converter.cellsize_number, [0.25, 1/3.0, 0.5])
self.assertAlmostEqual(converter.mass, 1.5 | units.kg)
# The number of particles in a cell should scale with the amount of mass in the cell:
self.assertAlmostRelativeEqual(
converter.mass * numpy.histogram(converter.indices, bins=4*3*2)[0] * 1.0/number_of_particles,
test_grid.rho.flatten()*test_grid.cellsize().prod(),
places = 2
)
def test2(self):
print "Testing the user interface"
number_of_particles = 10000
test_grid = self.setup_simple_grid()
sph_particles = convert_grid_to_SPH(test_grid, number_of_particles)
self.assertEqual(len(sph_particles), number_of_particles)
self.assertAlmostEqual(sph_particles.mass.sum(), 1.5 | units.kg)
self.assertAlmostEqual(sph_particles.velocity, [3.0, 4.0, 0.0] | units.m/units.s)
self.assertAlmostEqual(sph_particles.u, 1.0 | (units.m/units.s)**2)
# The number of particles in a cell should scale with the amount of mass in the cell:
self.assertAlmostRelativeEqual(
(1.5 | units.kg)/number_of_particles * numpy.histogramdd(
sph_particles.position.value_in(units.m), bins=(4,3,2))[0],
test_grid.rho*test_grid.cellsize().prod(),
places = 2
)
self.assertAlmostEqual(sph_particles.h_smooth, (50.0/number_of_particles)**(1.0/3) | units.m)
def test3(self):
print "Testing the user interface, random base_distribution_type"
number_of_particles = 10000
test_grid = self.setup_simple_grid()
sph_particles = convert_grid_to_SPH(test_grid, number_of_particles,
base_distribution_type = "random", seed = 12345)
self.assertEqual(len(sph_particles), number_of_particles)
self.assertAlmostEqual(sph_particles.mass.sum(), 1.5 | units.kg)
self.assertAlmostEqual(sph_particles.velocity, [3.0, 4.0, 0.0] | units.m/units.s)
self.assertAlmostEqual(sph_particles.u, 1.0 | (units.m/units.s)**2)
# For 'random', the number of particles in a cell should scale only on average
# with the amount of mass in the cell:
self.assertAlmostRelativeEqual(
((1.5 | units.kg)/number_of_particles * numpy.histogramdd(
sph_particles.position.value_in(units.m), bins=(4,3,2))[0]).sum(),
(test_grid.rho*test_grid.cellsize().prod()).sum(),
places = 2
)
self.assertRaises(AssertionError,
self.assertAlmostRelativeEqual,
(1.5 | units.kg)/number_of_particles * numpy.histogramdd(sph_particles.position.value_in(units.m), bins=(4,3,2))[0],
test_grid.rho*test_grid.cellsize().prod(),
places = 2,
)
self.assertAlmostEqual(sph_particles.h_smooth, (50.0/number_of_particles)**(1.0/3) | units.m)
def test4(self):
print "Testing exceptions"
number_of_particles = 10000
test_grid = self.setup_simple_grid()
self.assertEqual(test_grid[0].number_of_dimensions(), 2)
self.assertRaises(AmuseException, convert_grid_to_SPH, test_grid[0], number_of_particles,
expected_message = "Grid must be 3D")
self.assertRaises(AmuseException, convert_grid_to_SPH, test_grid,
number_of_particles, base_distribution_type = "bogus",
expected_message = "Unknown base_distribution_type: bogus. Possible "
"options are: 'random' or 'uniform'.")
```
#### File: test/reports/plot_speed_report.py
```python
import sys
import numpy
from optparse import OptionParser
try:
from matplotlib import pyplot
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
def select(row, cols_spec):
subspecs = cols_spec.split(',')
subspecs = map(str.strip, subspecs)
cols = []
for subspec in subspecs:
parts = subspec.split('-')
if len(parts) == 1:
cols.append(int(parts[0]))
else:
if len(parts[1]) == 0:
end = len(row)
else:
end = int(parts[1])
if end < 0:
end = len(row) + end
cols.extend(range(int(parts[0]), end))
for index in cols:
yield row[index]
def plot_speed_report(input_filename = None, output_filename = None, cols = '0-'):
with open(input_filename, 'r') as stream:
lines = stream.readlines()
header = None
x = []
data = []
for line in lines:
if line.startswith('#'):
header_for_next_line = line[1:].split(',')
header_for_next_line = list(select(header_for_next_line[2:], cols))
if not header is None:
if not header == header_for_next_line:
raise Exception("data does not have same header")
header = header_for_next_line
else:
parts = map(str.strip, line.split(','))
if parts[0]== '':
continue
x.append(int(parts[0]))
numbers = map(lambda x : float(x), parts[2:])
data.append(list(select(numbers, cols)))
x = numpy.asarray(x)
data = numpy.asarray(data)
print data.shape
figure = pyplot.figure(figsize=(9, 4))
subplot = pyplot.subplot(1,2,1)
handles = subplot.plot(x,data)
subplot.plot(x,1e-5 * x)
subplot.legend(
handles,
header,
loc='center left',
bbox_to_anchor=(1.05, 0.5),
ncol=1,
fancybox=False,
shadow=False)
pyplot.loglog()
if output_filename is None:
pyplot.show()
else:
pyplot.savefig(output_filename)
def new_option_parser():
result = OptionParser()
result.add_option(
"-o",
default = None,
dest="output_filename",
help="save figure to output, by default it will display it",
type="string"
)
result.add_option(
"-i",
default = 'report.csv',
dest="input_filename",
help="name of the file to load the data from",
type="string"
)
result.add_option(
"--cols",
default = '0-',
dest="cols",
help="columns to plot, can by 1,2,3 or 0-3 or 0-5, 6, 3",
type="string"
)
return result
if __name__ == '__main__':
options, arguments = new_option_parser().parse_args()
plot_speed_report(**options.__dict__)
``` |
{
"source": "joshuawassink/telemanom",
"score": 3
} |
#### File: telemanom/telemanom/errors.py
```python
import numpy as np
import pandas as pd
import more_itertools as mit
import os
import logging
logger = logging.getLogger('telemanom')
class Errors:
def __init__(self, channel, config, run_id):
"""
Batch processing of errors between actual and predicted values
for a channel.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
config (obj): Config object containing parameters for processing
run_id (str): Datetime referencing set of predictions in use
Attributes:
config (obj): see Args
window_size (int): number of trailing batches to use in error
calculation
n_windows (int): number of windows in test values for channel
i_anom (arr): indices of anomalies in channel test values
E_seq (arr of tuples): array of (start, end) indices for each
continuous anomaly sequence in test values
anom_scores (arr): score indicating relative severity of each
anomaly sequence in E_seq
e (arr): errors in prediction (predicted - actual)
e_s (arr): exponentially-smoothed errors in prediction
normalized (arr): prediction errors as a percentage of the range
of the channel values
"""
self.config = config
self.window_size = self.config.window_size
self.n_windows = int((channel.y_test.shape[0] -
(self.config.batch_size * self.window_size))
/ self.config.batch_size)
self.i_anom = np.array([])
self.E_seq = []
self.anom_scores = []
# raw prediction error
self.e = [abs(y_h-y_t[0]) for y_h, y_t in
zip(channel.y_hat, channel.y_test)]
smoothing_window = int(self.config.batch_size * self.config.window_size
* self.config.smoothing_perc)
if not len(channel.y_hat) == len(channel.y_test):
raise ValueError('len(y_hat) != len(y_test): {}, {}'
.format(len(channel.y_hat), len(channel.y_test)))
# smoothed prediction error
self.e_s = pd.DataFrame(self.e).ewm(span=smoothing_window)\
.mean().values.flatten()
# for values at beginning < sequence length, just use avg
if not channel.id == 'C-2': # anomaly occurs early in window
self.e_s[:self.config.l_s] = \
[np.mean(self.e_s[:self.config.l_s * 2])] * self.config.l_s
np.save(os.path.join('data', run_id, 'smoothed_errors', '{}.npy'
.format(channel.id)),
np.array(self.e_s))
self.normalized = np.mean(self.e / np.ptp(channel.y_test))
logger.info("normalized prediction error: {0:.2f}"
.format(self.normalized))
def adjust_window_size(self, channel):
"""
Decrease the historical error window size (h) if number of test
values is limited.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
"""
while self.n_windows < 0:
self.window_size -= 1
self.n_windows = int((channel.y_test.shape[0]
- (self.config.batch_size * self.window_size))
/ self.config.batch_size)
if self.window_size == 1 and self.n_windows < 0:
raise ValueError('Batch_size ({}) larger than y_test (len={}). '
'Adjust in config.yaml.'
.format(self.config.batch_size,
channel.y_test.shape[0]))
def merge_scores(self):
"""
If anomalous sequences from subsequent batches are adjacent they
will automatically be combined. This combines the scores for these
initial adjacent sequences (scores are calculated as each batch is
processed) where applicable.
"""
merged_scores = []
score_end_indices = []
for i, score in enumerate(self.anom_scores):
if not score['start_idx']-1 in score_end_indices:
merged_scores.append(score['score'])
score_end_indices.append(score['end_idx'])
def process_batches(self, channel):
"""
Top-level function for the Error class that loops through batches
of values for a channel.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
"""
self.adjust_window_size(channel)
for i in range(0, self.n_windows+1):
prior_idx = i * self.config.batch_size
idx = (self.config.window_size * self.config.batch_size) \
+ (i * self.config.batch_size)
if i == self.n_windows:
idx = channel.y_test.shape[0]
window = ErrorWindow(channel, self.config, prior_idx, idx, self, i)
window.find_epsilon()
window.find_epsilon(inverse=True)
window.compare_to_epsilon(self)
window.compare_to_epsilon(self, inverse=True)
if len(window.i_anom) == 0 and len(window.i_anom_inv) == 0:
continue
window.prune_anoms()
window.prune_anoms(inverse=True)
if len(window.i_anom) == 0 and len(window.i_anom_inv) == 0:
continue
window.i_anom = np.sort(np.unique(
np.append(window.i_anom, window.i_anom_inv))).astype('int')
window.score_anomalies(prior_idx)
# update indices to reflect true indices in full set of values
self.i_anom = np.append(self.i_anom, window.i_anom + prior_idx)
self.anom_scores = self.anom_scores + window.anom_scores
if len(self.i_anom) > 0:
# group anomalous indices into continuous sequences
groups = [list(group) for group in
mit.consecutive_groups(self.i_anom)]
self.E_seq = [(int(g[0]), int(g[-1])) for g in groups
if not g[0] == g[-1]]
# additional shift is applied to indices so that they represent the
# position in the original data array, obtained from the .npy files,
# and not the position on y_test (See PR #27).
self.E_seq = [(e_seq[0] + self.config.l_s,
e_seq[1] + self.config.l_s) for e_seq in self.E_seq]
self.merge_scores()
class ErrorWindow:
def __init__(self, channel, config, start_idx, end_idx, errors, window_num):
"""
Data and calculations for a specific window of prediction errors.
Includes finding thresholds, pruning, and scoring anomalous sequences
for errors and inverted errors (flipped around mean) - significant drops
in values can also be anomalous.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
config (obj): Config object containing parameters for processing
start_idx (int): Starting index for window within full set of
channel test values
end_idx (int): Ending index for window within full set of channel
test values
errors (arr): Errors class object
window_num (int): Current window number within channel test values
Attributes:
i_anom (arr): indices of anomalies in window
i_anom_inv (arr): indices of anomalies in window of inverted
telemetry values
E_seq (arr of tuples): array of (start, end) indices for each
continuous anomaly sequence in window
E_seq_inv (arr of tuples): array of (start, end) indices for each
continuous anomaly sequence in window of inverted telemetry
values
non_anom_max (float): highest smoothed error value below epsilon
non_anom_max_inv (float): highest smoothed error value below
epsilon_inv
config (obj): see Args
anom_scores (arr): score indicating relative severity of each
anomaly sequence in E_seq within a window
window_num (int): see Args
sd_lim (int): default number of standard deviations to use for
threshold if no winner or too many anomalous ranges when scoring
candidate thresholds
sd_threshold (float): number of standard deviations for calculation
of best anomaly threshold
sd_threshold_inv (float): same as above for inverted channel values
e_s (arr): exponentially-smoothed prediction errors in window
e_s_inv (arr): inverted e_s
sd_e_s (float): standard deviation of e_s
mean_e_s (float): mean of e_s
epsilon (float): threshold for e_s above which an error is
considered anomalous
epsilon_inv (float): threshold for inverted e_s above which an error
is considered anomalous
y_test (arr): Actual telemetry values for window
sd_values (float): st dev of y_test
perc_high (float): the 95th percentile of y_test values
perc_low (float): the 5th percentile of y_test values
inter_range (float): the range between perc_high - perc_low
num_to_ignore (int): number of values to ignore initially when
looking for anomalies
"""
self.i_anom = np.array([])
self.E_seq = np.array([])
self.non_anom_max = -1000000
self.i_anom_inv = np.array([])
self.E_seq_inv = np.array([])
self.non_anom_max_inv = -1000000
self.config = config
self.anom_scores = []
self.window_num = window_num
self.sd_lim = 12.0
self.sd_threshold = self.sd_lim
self.sd_threshold_inv = self.sd_lim
self.e_s = errors.e_s[start_idx:end_idx]
self.mean_e_s = np.mean(self.e_s)
self.sd_e_s = np.std(self.e_s)
self.e_s_inv = np.array([self.mean_e_s + (self.mean_e_s - e)
for e in self.e_s])
self.epsilon = self.mean_e_s + self.sd_lim * self.sd_e_s
self.epsilon_inv = self.mean_e_s + self.sd_lim * self.sd_e_s
self.y_test = channel.y_test[start_idx:end_idx]
self.sd_values = np.std(self.y_test)
self.perc_high, self.perc_low = np.percentile(self.y_test, [95, 5])
self.inter_range = self.perc_high - self.perc_low
# ignore initial error values until enough history for processing
self.num_to_ignore = self.config.l_s * 2
# if y_test is small, ignore fewer
if len(channel.y_test) < 2500:
self.num_to_ignore = self.config.l_s
if len(channel.y_test) < 1800:
self.num_to_ignore = 0
def find_epsilon(self, inverse=False):
"""
Find the anomaly threshold that maximizes function representing
tradeoff between:
a) number of anomalies and anomalous ranges
b) the reduction in mean and st dev if anomalous points are removed
from errors
(see https://arxiv.org/pdf/1802.04431.pdf)
Args:
inverse (bool): If true, epsilon is calculated for inverted errors
"""
e_s = self.e_s if not inverse else self.e_s_inv
max_score = -10000000
for z in np.arange(2.5, self.sd_lim, 0.5):
epsilon = self.mean_e_s + (self.sd_e_s * z)
pruned_e_s = e_s[e_s < epsilon]
i_anom = np.argwhere(e_s >= epsilon).reshape(-1,)
buffer = np.arange(1, self.config.error_buffer)
i_anom = np.sort(np.concatenate((i_anom,
np.array([i+buffer for i in i_anom])
.flatten(),
np.array([i-buffer for i in i_anom])
.flatten())))
i_anom = i_anom[(i_anom < len(e_s)) & (i_anom >= 0)]
i_anom = np.sort(np.unique(i_anom))
if len(i_anom) > 0:
# group anomalous indices into continuous sequences
groups = [list(group) for group
in mit.consecutive_groups(i_anom)]
E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]
mean_perc_decrease = (self.mean_e_s - np.mean(pruned_e_s)) \
/ self.mean_e_s
sd_perc_decrease = (self.sd_e_s - np.std(pruned_e_s)) \
/ self.sd_e_s
score = (mean_perc_decrease + sd_perc_decrease) \
/ (len(E_seq) ** 2 + len(i_anom))
# sanity checks / guardrails
if score >= max_score and len(E_seq) <= 5 and \
len(i_anom) < (len(e_s) * 0.5):
max_score = score
if not inverse:
self.sd_threshold = z
self.epsilon = self.mean_e_s + z * self.sd_e_s
else:
self.sd_threshold_inv = z
self.epsilon_inv = self.mean_e_s + z * self.sd_e_s
def compare_to_epsilon(self, errors_all, inverse=False):
"""
Compare smoothed error values to epsilon (error threshold) and group
consecutive errors together into sequences.
Args:
errors_all (obj): Errors class object containing list of all
previously identified anomalies in test set
"""
e_s = self.e_s if not inverse else self.e_s_inv
epsilon = self.epsilon if not inverse else self.epsilon_inv
# Check: scale of errors compared to values too small?
if not (self.sd_e_s > (.05 * self.sd_values) or max(self.e_s)
> (.05 * self.inter_range)) or not max(self.e_s) > 0.05:
return
i_anom = np.argwhere((e_s >= epsilon) &
(e_s > 0.05 * self.inter_range)).reshape(-1,)
if len(i_anom) == 0:
return
buffer = np.arange(1, self.config.error_buffer+1)
i_anom = np.sort(np.concatenate((i_anom,
np.array([i + buffer for i in i_anom])
.flatten(),
np.array([i - buffer for i in i_anom])
.flatten())))
i_anom = i_anom[(i_anom < len(e_s)) & (i_anom >= 0)]
# if it is first window, ignore initial errors (need some history)
if self.window_num == 0:
i_anom = i_anom[i_anom >= self.num_to_ignore]
else:
i_anom = i_anom[i_anom >= len(e_s) - self.config.batch_size]
i_anom = np.sort(np.unique(i_anom))
# capture max of non-anomalous values below the threshold
# (used in filtering process)
batch_position = self.window_num * self.config.batch_size
window_indices = np.arange(0, len(e_s)) + batch_position
adj_i_anom = i_anom + batch_position
window_indices = np.setdiff1d(window_indices,
np.append(errors_all.i_anom, adj_i_anom))
candidate_indices = np.unique(window_indices - batch_position)
non_anom_max = np.max(np.take(e_s, candidate_indices))
# group anomalous indices into continuous sequences
groups = [list(group) for group in mit.consecutive_groups(i_anom)]
E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]
if inverse:
self.i_anom_inv = i_anom
self.E_seq_inv = E_seq
self.non_anom_max_inv = non_anom_max
else:
self.i_anom = i_anom
self.E_seq = E_seq
self.non_anom_max = non_anom_max
def prune_anoms(self, inverse=False):
"""
Remove anomalies that don't meet minimum separation from the next
closest anomaly or error value
Args:
inverse (bool): If true, epsilon is calculated for inverted errors
"""
E_seq = self.E_seq if not inverse else self.E_seq_inv
e_s = self.e_s if not inverse else self.e_s_inv
non_anom_max = self.non_anom_max if not inverse \
else self.non_anom_max_inv
if len(E_seq) == 0:
return
E_seq_max = np.array([max(e_s[e[0]:e[1]+1]) for e in E_seq])
E_seq_max_sorted = np.sort(E_seq_max)[::-1]
E_seq_max_sorted = np.append(E_seq_max_sorted, [non_anom_max])
i_to_remove = np.array([])
for i in range(0, len(E_seq_max_sorted)-1):
if (E_seq_max_sorted[i] - E_seq_max_sorted[i+1]) \
/ E_seq_max_sorted[i] < self.config.p:
i_to_remove = np.append(i_to_remove, np.argwhere(
E_seq_max == E_seq_max_sorted[i]))
else:
i_to_remove = np.array([])
i_to_remove[::-1].sort()
i_to_remove = [int(i) for i in i_to_remove]
if len(i_to_remove) > 0:
E_seq = np.delete(E_seq, i_to_remove, axis=0)
if len(E_seq) == 0 and inverse:
self.i_anom_inv = np.array([])
return
elif len(E_seq) == 0 and not inverse:
self.i_anom = np.array([])
return
indices_to_keep = np.concatenate([range(e_seq[0], e_seq[-1]+1)
for e_seq in E_seq])
if not inverse:
mask = np.isin(self.i_anom, indices_to_keep)
self.i_anom = self.i_anom[mask]
else:
mask_inv = np.isin(self.i_anom_inv, indices_to_keep)
self.i_anom_inv = self.i_anom_inv[mask_inv]
def score_anomalies(self, prior_idx):
"""
Calculate anomaly scores based on max distance from epsilon
for each anomalous sequence.
Args:
prior_idx (int): starting index of window within full set of test
values for channel
"""
groups = [list(group) for group in mit.consecutive_groups(self.i_anom)]
for e_seq in groups:
score_dict = {
"start_idx": e_seq[0] + prior_idx,
"end_idx": e_seq[-1] + prior_idx,
"score": 0
}
score = max([abs(self.e_s[i] - self.epsilon)
/ (self.mean_e_s + self.sd_e_s) for i in
range(e_seq[0], e_seq[-1] + 1)])
inv_score = max([abs(self.e_s_inv[i] - self.epsilon_inv)
/ (self.mean_e_s + self.sd_e_s) for i in
range(e_seq[0], e_seq[-1] + 1)])
# the max score indicates whether anomaly was from regular
# or inverted errors
score_dict['score'] = max([score, inv_score])
self.anom_scores.append(score_dict)
```
#### File: telemanom/telemanom/preprocessor.py
```python
import logging
from sklearn.preprocessing import MinMaxScaler, StandardScaler
logger = logging.getLogger('telemanom')
class Preprocessor:
def __init__(self, config, chan_id, train, test):
"""
Preprocess data in preparation for modeling.
Args:
config (obj): Config object containing parameters for processing
chan_id (str): channel id
train (arr): numpy array containing raw train data
test (arr): numpy array containing raw test data
Attributes:
id (str): channel id
config (obj): see Args
train (arr): train data loaded from .npy file
test(arr): test data loaded from .npy file
"""
self.id = chan_id
self.config = config
self.train = None
self.test = None
self.scaler = None
if self.config.scale:
self.scale()
def scale(self):
"""Min/Max or Standard Scale
Remove outliers, etc.
"""
if self.config.scaler == 'min_max':
self.scaler = MinMaxScaler()
else:
self.scaler = StandardScaler()
self.train = self.scaler.fit_transform(self.train)
self.test = self.scaler.transform(self.test)
``` |
{
"source": "Joshua-Weaver1/ECM1400-Continuous-Assessment",
"score": 3
} |
#### File: Joshua-Weaver1/ECM1400-Continuous-Assessment/user_interface.py
```python
import json
import logging
from flask import Flask
from flask import render_template
import logging_formatting
import test_covid_news_handling as tcnh
import covid_data_handler as cdh
import covid_news_handling as cnh
import test_covid_data_handler as tcdh
#Initalise app
app = Flask(__name__)
#Create Logger for this module
logger = logging.getLogger(__name__)
#Opening JSON file
j = open('config.json')
#Returns JSON object as a dictionary
data_configuration_json = json.load(j)
#Closing file
j.close()
#Define Global Variables
news = cnh.news_API_request(covid_terms = "Covid COVID-19 coronavirus")
covid_data_exeter = cdh.covid_API_request()
covid_data_england = cdh.covid_API_request(location = data_configuration_json["location"],
location_type = data_configuration_json["location_type"])
@app.route('/')
def redirect():
"""Redirects user to the /index app route"""
#Logging
logging.info("The redirect function has been called")
string = "Please enter the url http://127.0.0.1:5000/index to access the dashboard."
return string
@app.route('/index')
def run_application():
"""Main function which is responsible for the events produced by the client"""
#Logging
logging.info("The run_application function has been called")
#Perfom Tests
#Covid Data Handler Tests
tcdh.test_parse_csv_data()
tcdh.test_process_covid_csv_data()
tcdh.test_covid_API_request()
#Covid News Handling Tests
tcnh.test_news_API_request()
news_articles = news['articles']
cnh.delete_news_article(news_articles)
return render_template('index.html',
title = 'Coronavirus Daily Update',
image = 'coronavirus1.jpg',
news_articles = news['articles'],
location = covid_data_exeter['data'][0]['areaName'],
local_7day_infections =
covid_data_exeter['data'][0]['newCasesByPublishDateRollingSum'],
nation_location =
covid_data_england['data'][0]['areaName'],
national_7day_infections =
covid_data_england['data'][0]['newCasesByPublishDateRollingSum'],
hospital_cases =
"Hospital Cases: " + str(covid_data_england['data'][0]['hospitalCases']),
deaths_total =
"Total Deaths: " + str(covid_data_england['data'][0]['cumDeaths28DaysByPublishDate']))
if __name__ == '__main__':
app.run()
``` |
{
"source": "joshuaword2alt/ENUNU",
"score": 2
} |
#### File: ENUNU/synthesis/enunu.py
```python
from datetime import datetime
from os import chdir, makedirs, startfile
from os.path import basename, dirname, exists, join, splitext
from sys import argv
from tempfile import mkdtemp
import colored_traceback.always # pylint: disable=unused-import
import utaupy
from omegaconf import DictConfig, OmegaConf
from utaupy.utils import hts2json, ustobj2songobj
try:
from hts2wav import hts2wav
except ModuleNotFoundError:
print('----------------------------------------------------------')
print('初回起動ですね。')
print('PC環境に合わせてPyTorchを自動インストールします。')
print('インストール完了までしばらくお待ちください。')
print('----------------------------------------------------------')
from install_torch import pip_install_torch
pip_install_torch(join('.', 'python-3.8.10-embed-amd64', 'python.exe'))
print('----------------------------------------------------------')
print('インストール成功しました。歌声合成を始めます。')
print('----------------------------------------------------------\n')
from hts2wav import hts2wav # pylint: disable=ungrouped-imports
def get_project_path(utauplugin: utaupy.utauplugin.UtauPlugin):
"""
キャッシュパスとプロジェクトパスを取得する。
"""
setting = utauplugin.setting
# ustのパス
path_ust = setting.get('Project')
# 音源フォルダ
voice_dir = setting['VoiceDir']
# 音声キャッシュのフォルダ(LABとJSONを設置する)
cache_dir = setting['CacheDir']
return path_ust, voice_dir, cache_dir
def utauplugin2hts(path_plugin_in, path_table, path_full_out, path_mono_out=None,
strict_sinsy_style=False):
"""
USTじゃなくてUTAUプラグイン用に最適化する。
ust2hts.py 中の ust2hts を改変して、
[#PREV] と [#NEXT] に対応させている。
"""
# プラグイン用一時ファイルを読み取る
plugin = utaupy.utauplugin.load(path_plugin_in)
# 変換テーブルを読み取る
table = utaupy.table.load(path_table, encoding='utf-8')
# 2ノート以上選択されているかチェックする
if len(plugin.notes) < 2:
raise Exception('ENUNU requires at least 2 notes. / ENUNUを使うときは2ノート以上選択してください。')
# 歌詞が無いか空白のノートを休符にする。
for note in plugin.notes:
if note.lyric.strip(' ') == '':
note.lyric = 'R'
# [#PREV] や [#NEXT] が含まれているか判定
prev_exists = plugin.previous_note is not None
next_exists = plugin.next_note is not None
if prev_exists:
plugin.notes.insert(0, plugin.previous_note)
if next_exists:
plugin.notes.append(plugin.next_note)
# Ust → HTSFullLabel
song = ustobj2songobj(plugin, table)
full_label = utaupy.hts.HTSFullLabel()
full_label.song = song
full_label.fill_contexts_from_songobj()
# [#PREV] と [#NEXT] を消す前の状態での休符周辺のコンテキストを調整する
if prev_exists or next_exists:
full_label = utaupy.hts.adjust_pau_contexts(full_label, strict=strict_sinsy_style)
# [#PREV] のノート(の情報がある行)を削る
if prev_exists:
target_note = full_label[0].note
while full_label[0].note is target_note:
del full_label[0]
# PREVを消しても前のノート分ずれているので、最初の音素開始時刻が0になるようにする。
# ずれを取得
offset = full_label[0].start
# 全音素の開始と終了時刻をずらす
for oneline in full_label:
oneline.start -= offset
oneline.end -= offset
# [#NEXT] のノート(の情報がある行)を削る
if next_exists:
target_note = full_label[-1].note
while full_label[-1].note is target_note:
del full_label[-1]
# ファイル出力
s = '\n'.join(list(map(str, full_label)))
with open(path_full_out, mode='w', encoding='utf-8') as f:
f.write(s)
if path_mono_out is not None:
full_label.as_mono().write(path_mono_out)
def main_as_plugin(path_plugin: str) -> str:
"""
UtauPluginオブジェクトから音声ファイルを作る
"""
print(f'{datetime.now()} : reading setting in ust')
# UTAUの一時ファイルに書いてある設定を読み取る
plugin = utaupy.utauplugin.load(path_plugin)
path_ust, voice_dir, _ = get_project_path(plugin)
path_enuconfig = join(voice_dir, 'enuconfig.yaml')
if not exists(path_enuconfig):
raise Exception(
'音源フォルダに enuconfig.yaml が見つかりません。'
'UTAU音源選択でENUNU用モデルを指定してください。'
)
# カレントディレクトリを音源フォルダに変更する
chdir(voice_dir)
# configファイルを読み取る
print(f'{datetime.now()} : reading enuconfig')
config = DictConfig(OmegaConf.load(path_enuconfig))
# 入出力パスを設定する
if path_ust is not None:
songname = f"{splitext(basename(path_ust))[0]}__{datetime.now().strftime('%Y%m%d%H%M%S')}"
out_dir = join(dirname(path_ust), songname)
# USTが未保存の場合
else:
print('USTが保存されていないので一時フォルダにWAV出力します。')
songname = f"temp__{datetime.now().strftime('%Y%m%d%H%M%S')}"
out_dir = mkdtemp(prefix='enunu-')
# 出力フォルダがなければつくる
makedirs(out_dir, exist_ok=True)
# 各種出力ファイルのパスを設定
path_full_score_lab = join(out_dir, f'{songname}_full_score.lab')
path_mono_score_lab = join(out_dir, f'{songname}_mono_score.lab')
path_json = join(out_dir, f'{songname}_full_score.json')
path_wav = join(out_dir, f'{songname}.wav')
path_ust_out = join(out_dir, f'{songname}.ust')
# フルラベル生成
print(f'{datetime.now()} : converting TMP to LAB')
utauplugin2hts(
path_plugin,
config.table_path,
path_full_score_lab,
path_mono_out=path_mono_score_lab,
strict_sinsy_style=(not config.trained_for_enunu)
)
# ファイル処理
# 選択範囲のUSTを出力(musicxml用)
print(f'{datetime.now()} : exporting UST')
new_ust = plugin.as_ust()
for note in new_ust.notes:
# 基本情報以外を削除
note.suppin()
# 歌詞がないノートを休符にする
if note.lyric.strip(' ') == '':
note.lyric = 'R'
new_ust.write(path_ust_out)
print(f'{datetime.now()} : converting LAB to JSON')
hts2json(path_full_score_lab, path_json)
print(f'{datetime.now()} : converting LAB to WAV')
hts2wav(config, path_full_score_lab, path_wav)
print(f'{datetime.now()} : generating WAV ({path_wav})')
# Windowsの時は音声を再生する。
startfile(path_wav)
return path_wav
def main(path: str):
"""
入力ファイルによって処理を分岐する。
"""
# logging.basicConfig(level=logging.INFO)
if path.endswith('.tmp'):
main_as_plugin(path)
else:
raise ValueError('Input file must be TMP(plugin).')
if __name__ == '__main__':
print('_____ξ ・ヮ・)ξ < ENUNU v0.2.5 ________')
print(f'argv: {argv}')
if len(argv) == 2:
path_utauplugin = argv[1]
elif len(argv) == 1:
path_utauplugin = \
input('Input file path of TMP(plugin)\n>>> ').strip('"')
main(path_utauplugin)
``` |
{
"source": "joshua-wu/easy_word_cloud",
"score": 4
} |
#### File: easy_word_cloud/easywordcloud/layout_cloud.py
```python
import random
import os
import numpy as np
import math
import imp
try:
imp.find_module('PIL')
found = True
except ImportError:
found = False
if found:
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
else:
import Image
import ImageDraw
import ImageFont
debug = 0
float_min = 1e-300
from sys import platform as _platform
def get_default_font():
"""return the default font of different operator system,
highly recommend you prepare the fonts youself instead of using this function"""
if _platform == "linux" or _platform == "linux2":
# linux, sorry, i dont know much
FONT_PATH = '/usr/share/fonts/truetype/droid/DroidSansMono.ttf'
elif _platform == "darwin":
# OS X
FONT_PATH = "/Library/Fonts/hei.ttf"
elif _platform == "win32":
# Windows...
FONT_PATH = "c:/windows/fonts/msyh.ttf"
return FONT_PATH
def draw_word_cloud(words, width=800, height=600, output_file_name=None, font_path=None):
"""
Generate the word cloud.
Parameters
----------
words : array of tuples
A tuple contains the word and its weight. The weight must greate than 0. the weight can be any positive num.
width : int (default=800)
Width of the canvas.
height : int (default=600)
Height of the canvas.
output_file_name: string (default=None)
the path to the output figure name.
if the output_file_name is not None, a picture with path output_file_name will be saved.
if the output_file_name is None. it will Displays an image.
On Unix platforms, it will saves the image to a temporary PPM file, and calls the xv utility.
On Windows, it will saves the image to a temporary BMP file, and uses the standard BMP display utility to show it.
font_path : string
Font path to the font that will be used (OTF or TTF). If the font_path is None then will
call the get_default_font to get_default_font
to get a default font.
Notes
-----
"""
best_score = 0
best_elements = None
for font_scale in [0.1, 0.5, 1, 2, 5, 7, 10, 15, 20, 30, 50]:
elements, score, fill_rate, show_rate = fit_words(
words, width=width, height=height, margin=2, scale=font_scale)
if debug >= 1:
print('scale:', font_scale, 'score:', score, 'show_rate:', show_rate, 'fille_rate:', fill_rate)
if score > best_score:
best_elements, best_score = elements, score
if score == 0.0:
break
draw(best_elements, output_file_name, width=width, height=height, scale=1)
def random_color_func(word, font_size, position, orientation):
return "hsl(%d" % random.randint(0, 255) + ", 80%, 50%)"
def select_orintation(font_size, font_path, canvas_size, word, margin, draw, font):
""""choice the orintation for each word"""
width, height = canvas_size
draw.setfont(font)
nontransposed_box_size = draw.textsize(word)
transposed_font = ImageFont.TransposedFont(font, orientation=Image.ROTATE_90)
draw.setfont(transposed_font)
transposed_box_size = draw.textsize(word)
box_size = None
orientation = None
if not check_in_bound((width, height), (transposed_box_size[1] + margin, transposed_box_size[0] + margin)):
box_size = nontransposed_box_size
orientation = None
elif not check_in_bound((width, height), (nontransposed_box_size[1] + margin, nontransposed_box_size[0] + margin)):
box_size = transposed_box_size
orientation = Image.ROTATE_90
if debug >= 1:
print('trans:', transposed_box_size, 'nontrans:', nontransposed_box_size, orientation, box_size)
# transpose font optionally
if box_size is None:
box_size, orientation = random.choice([(nontransposed_box_size, None)]*9 + [(transposed_box_size, Image.ROTATE_90)])
return box_size, orientation
def fit_words(words, font_path=None, width=80, height=40,
margin=2, prefer_horiz=0.90, scale=5, file_name=None):
"""Generate the positions for words.
Parameters
----------
words : array of tuples
A tuple contains the word and its frequency.
font_path : string
Font path to the font that will be used (OTF or TTF).
Defaults to DroidSansMono path, but you might not have it.
width : int (default=400)
Width of the canvas.
height : int (default=200)
Height of the canvas.
margin: int(default=2)
prefer_horiz : float (default=0.90)
The ratio of times to try horizontal fitting as opposed to vertical.
scale : int( default=5)
this number is used to scale the font size in case of the font is too small.
Notes
-----
"""
if len(words) <= 0:
print("We need at least 1 word to plot a word cloud, got %d."
% len(words))
if font_path is None:
font_path = get_default_font()
if not os.path.exists(font_path):
raise ValueError("The font %s does not exist." % font_path)
# create image
img_grey = Image.new("L", (width, height))
draw = ImageDraw.Draw(img_grey)
valid_words, font_sizes, positions, orientations = [], [], [], []
#sort the words by weight
sum_weight = sum(weight for word, weight in words)
words = [(word, weight * 1.0 / sum_weight) for word, weight in words]
# start drawing grey image
for word, weight in sorted(words, key=lambda x: x[1], reverse=True):
# alternative way to set the font size
integral = np.asarray(img_grey)
font_size = int((weight * height * scale))
font = ImageFont.truetype(font_path, font_size)
box_size, orientation = select_orintation(font_size, font_path, (width, height), word, margin, draw, font)
# find possible places using integral image:
result = query_integral_image(integral, (box_size[0] + margin,
box_size[1] + margin))
if result is None:
break
if debug >= 1:
print('font_size', font_size, word, weight, 'orientation:', orientation, 'pos:', result, 'box_size:', box_size)
x, y = np.array(result) + margin // 2
#need to reset the font
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
draw.setfont(transposed_font)
draw.text((y, x), word, fill="white")
# store the information
valid_words.append((word, weight))
positions.append((x, y))
orientations.append(orientation)
font_sizes.append(font_size)
fill_rate = 1.0 * (integral != 0).sum() / (integral.shape[0] * integral.shape[1])
show_rate = len(valid_words) * 1.0 / len(words)
score = show_rate * fill_rate
if debug >= 3:
print(zip(valid_words, font_sizes, positions, orientations))
print('size:', len(valid_words), 'all:', len(words))
if debug >= 1:
print('integral sum:', (integral != 0).sum(), 'show_rate:', show_rate, 'fille_rate:', fill_rate, 'score:', score)
return zip(valid_words, font_sizes, positions, orientations), score, fill_rate, show_rate
def draw(elements, file_name=None, font_path=None, width=80, height=40, scale=1,
color_func=random_color_func):
if font_path is None:
font_path = get_default_font()
img = Image.new("RGB", (width, height))
draw = ImageDraw.Draw(img)
for (word, weight), font_size, position, orientation in elements:
font = ImageFont.truetype(font_path, font_size)
transposed_font = ImageFont.TransposedFont(font,
orientation=orientation)
draw.setfont(transposed_font)
color = random_color_func(word, font_size * scale, position, orientation)
pos = (position[1], position[0])
draw.text(pos, word, fill=color)
if file_name is not None:
img.save(file_name)
else:
img.show()
if debug >= 3:
a = np.asarray(img)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
print(1 if a[i, j].any() else 0),
print('\n'),
def collision_detect(integral_image, pos, box_size):
height, width = integral_image.shape
x, y = pos
box_width, box_height = box_size
#out of the bound
if x + box_height >= height or y + box_width >= width:
return True
if integral_image[x: x + box_height, y: y + box_width].any():
return True
return False
def get_spiral_function(size):
width, height = size
e = width * 1.0 / height
return lambda t: (t * 1.0 * math.cos(t), e * t * math.sin(t))
def euclid_distance(pos1, pos2):
return math.sqrt((pos1[0]-pos2[0])**2 + (pos1[1]-pos2[1])**2)
def check_in_bound(size, pos_current):
"""check the pos_current in the bound or not """
pos_x, pos_y = pos_current[0], pos_current[1]
width, height = size
if pos_x >= 0 and pos_x < height and pos_y >= 0 and pos_y < width:
return True
return False
def query_integral_image(integral_image, box_size):
#print('sum:', integral_image.sum())
height = integral_image.shape[0]
width = integral_image.shape[1]
box_width, box_height = box_size
#area, i, j
spiral = get_spiral_function((width, height))
delta = random.choice([1, -1])
t = 0
#pos_begin_x, pos_begin_y = random.randint(0, height-1), random.randint(0, width-1)
pos_begin_x, pos_begin_y = \
int((height - box_height) * random.uniform(0.25, 0.75)), int((width - box_width) * random.uniform(0.25, 0.75))
#print('begin:x:y:', pos_begin_x, pos_begin_y, box_size, (width, height), height - box_height)
max_distance = euclid_distance((height, width), (0, 0))
while True:
#first geenrate a random point on the horizon
pos_x, pos_y = spiral(t)
pos_x, pos_y = int(pos_x + pos_begin_x + 0.5), int(pos_y + pos_begin_y + 0.5)
t += delta
#then move it piral
if euclid_distance((pos_x, pos_y), (pos_begin_x, pos_begin_y)) >= max_distance:
break
if not check_in_bound((width, height), (pos_x, pos_y)):
continue
if not collision_detect(integral_image, (pos_x, pos_y), box_size):
if debug >= 3:
for i in range(integral_image.shape[0]):
for j in range(integral_image.shape[1]):
print(1 if integral_image[i, j] != 0 else 0),
print('\n'),
return pos_x, pos_y
return None
``` |
{
"source": "joshua-wx/openradartools",
"score": 3
} |
#### File: openradartools/openradartools/physical.py
```python
import os
import numpy as np
import wradlib as wrl
#from wradlab
def open_dem(dem_fn='australia_250m_dem.tif', invalid_terrain=-9999):
"""
Open a DEM file for generating masks. returns variables required for wradlib processing
"""
rasterfile = wrl.util.get_wradlib_data_file(dem_fn)
ds = wrl.io.open_raster(rasterfile)
rastervalues, rastercoords, proj = wrl.georef.extract_raster_dataset(ds, nodata=invalid_terrain)
return (rastervalues, rastercoords, proj)
#from wradlab
def beam_blocking(radar, srtm_ffn, bb_ffn=None):
"""
Apply the wradlib beam blocking library for the target volume.
Parameters
----------
radar : Radar
Py-ART radar object.
srtm_ffn : string
Full path to SRTM geotiff file.
bb_ffn : string
full path to output npz file for CBB field. Use None to skip saving to file.
Returns
-------
ccb_dict : dict
Dictionary containing the cumulative beam blocking (CBB) for
every pixel in the radar object.
"""
# site parameters
radar_lat = radar.latitude['data'][0]
radar_lon = radar.longitude['data'][0]
radar_alt = radar.altitude['data'][0]
sitecoords = (radar_lon, radar_lat, radar_alt)
nsweeps = radar.nsweeps
nrays = int(radar.nrays / nsweeps)
nbins = int(radar.ngates)
el_list = radar.fixed_angle['data']
range_res = radar.range['meters_between_gates']
try:
bw = radar.instrument_parameters['radar_beam_width_h']['data']
except:
print('beamwidth info missing form volume, using default of 1deg')
bw = 1
# grid arrays
r = np.arange(nbins) * range_res
beamradius = wrl.util.half_power_radius(r, bw)
# read geotiff
ds = wrl.io.open_raster(srtm_ffn)
rastervalues, rastercoords, proj = wrl.georef.extract_raster_dataset(ds, nodata=-32768)
#build coordiantes
coord = None
for el in el_list:
#calculat spherical coordiantes for a sweep
sweep_coord = wrl.georef.sweep_centroids(nrays, range_res, nbins, el)
if coord is None:
coord = sweep_coord
else:
#append spherical coordiantes for a sweep
coord = np.append(coord, sweep_coord, axis=0)
#calculate geographical coordinates of spherical space
coords = wrl.georef.spherical_to_proj(coord[..., 0],
coord[..., 1],
coord[..., 2], sitecoords)
lon = coords[..., 0]
lat = coords[..., 1]
alt = coords[..., 2]
#polar coodinates for mapping terrain (no altitude)
polcoords = coords[..., :2]
# Clip the region inside our bounding box
rlimits = (lon.min(), lat.min(), lon.max(), lat.max())
ind = wrl.util.find_bbox_indices(rastercoords, rlimits)
rastercoords_clip = rastercoords.copy()[ind[1]:ind[3], ind[0]:ind[2], ...]
rastervalues_clip = rastervalues.copy()[ind[1]:ind[3], ind[0]:ind[2]]
# Map rastervalues to polar grid points
polarvalues = wrl.ipol.cart_to_irregular_interp(rastercoords_clip, rastervalues_clip,
polcoords, method='nearest')
# calculate beam blocking for each bin
PBB = wrl.qual.beam_block_frac(polarvalues, alt, beamradius)
PBB = np.ma.masked_invalid(PBB)
# calculate beam blocking along each ray
CBB = wrl.qual.cum_beam_block_frac(PBB)
# save to npz file
if bb_ffn is not None:
np.savez(bb_ffn, CBB=CBB, PBB=PBB)
# generate meta
cbb_dict = {'data': CBB, 'units': '%',
'long_name': 'cumulative beam blocking percentage',
'description': "cumulative beam blocking implemented by wradlib: https://docs.wradlib.org/en/stable/notebooks/beamblockage/wradlib_beamblock.html",
'comment': "Derived from a 500m hozitonal resolution DEM generated by the Shuttle Radar Topography Mission (SRTM)"}
return cbb_dict
def build_masks(vol_ffn, dem_info, bw_peaks=3.0, invalid_terrain = -9999, terrain_offset = 2000):
#build radar info
radar = pyart.aux_io.read_odim_h5(vol_ffn)
sitecoords = (radar.longitude['data'][0], radar.latitude['data'][0], radar.altitude['data'][0])
nrays = int(radar.nrays/radar.nsweeps) # number of rays
nbins = radar.ngates # number of range bins
el_list = radar.fixed_angle['data'] # vertical antenna pointing angle (deg)
range_res = radar.range['data'][2] - radar.range['data'][1]# range resolution (meters)
#unpack DEM
rastervalues, rastercoords, proj = dem_info
#build coordiantes
coord = None
for el in el_list:
#calculat spherical coordiantes for a sweep
sweep_coord = wrl.georef.sweep_centroids(nrays, range_res, nbins, el)
if coord is None:
coord = sweep_coord
else:
#append spherical coordiantes for a sweep
coord = np.append(coord, sweep_coord, axis=0)
#calculate geographical coordinates of spherical space
coords = wrl.georef.spherical_to_proj(coord[..., 0],
coord[..., 1],
coord[..., 2], sitecoords)
lon = coords[..., 0]
lat = coords[..., 1]
alt = coords[..., 2]
#polar coodinates for mapping terrain (no altitude)
polcoords = coords[..., :2]
# Clip the region inside our bounding box
rlimits = (lon.min(), lat.min(), lon.max(), lat.max())
ind = wrl.util.find_bbox_indices(rastercoords, rlimits)
rastercoords_clip = rastercoords.copy()[ind[1]:ind[3], ind[0]:ind[2], ...]
rastervalues_clip = rastervalues.copy()[ind[1]:ind[3], ind[0]:ind[2]]
# Map rastervalues to polar grid points
polarvalues = wrl.ipol.cart_to_irregular_interp(rastercoords_clip, rastervalues_clip,
polcoords, method='nearest')
#calculate sea mask using invalid terrain value
sea_mask = polarvalues == invalid_terrain
#calculate clutter mask
#where beam centre + 3dB beam width is lower than the terrain + terrain_offset
r = np.arange(nbins) * range_res
beamradius = wrl.util.half_power_radius(r, bw_peaks)
beam_bottom = alt - beamradius
clutter_mask = beam_bottom <= (polarvalues + terrain_offset)
return sea_mask, clutter_mask
``` |
{
"source": "joshuayap98/py_crawler",
"score": 2
} |
#### File: joshuayap98/py_crawler/main.py
```python
from postscrawl.run_scraper import Scraper
def main():
scraper = Scraper()
scraper.start_AAPL_posts_crawl()
if __name__ == '__main__':
main()
```
#### File: postscrawl/postscrawl/pipelines.py
```python
import json
class PostscrawlPipeline:
def process_item(self, item, spider):
item = json.dumps(item, sort_keys=True)
print(item)
return item
``` |
{
"source": "JoshuaYu-crash/C4EP2-2021",
"score": 2
} |
#### File: Host/dockerdata/dockerdata.py
```python
import docker
cli = docker.APIClient(base_url='unix://var/run/docker.sock')
def getDockerData():
datalist = cli.containers()
retData = []
for i in cli.containers():
i["stats"] = cli.stats(i["Id"], stream=False)
retData.append(i)
return retData
def getDockerStats(containID):
return cli.stats(containID, stream=False)
if __name__ == '__main__':
getDockerData()
```
#### File: C4EP2-2021/Ryu/ip_ban.py
```python
from transinfo_server import ban
def add_danger_ip(saddr):
ban(saddr, banned=True)
def add_doubt_ip(saddr):
ban(saddr, banned=False)
```
#### File: C4EP2-2021/Ryu/topology.py
```python
from flask import Flask, render_template, request, jsonify
from pyecharts import options as opts
from pyecharts.charts import Graph
import json
import redis
from flask_cors import *
r = redis.Redis(host="127.0.0.1", port=6379)
app = Flask(__name__)
CORS(app, supports_credentials=True)
@app.route("/dockermsg", methods=["POST"])
def dockerMsg():
data = request.json
host = data["host"]
datalist = data["data"]
# print(datalist)
r.set(host, json.dumps(datalist))
return "ok"
@app.route("/getdockermsg", methods=["GET"])
def getDockerMsg():
host = request.args.get("host")
docker = request.args.get("dockerdata")
dockers = json.loads(r.get(host))
tar = None
# print(dockers)
for doc in dockers:
print(doc["NetworkSettings"]["Networks"]["bridge"]["IPAddress"], docker)
if docker == doc["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]:
tar = doc
break
print(tar)
return jsonify(tar)
def graph_base() -> Graph:
nodes = []
links = []
categories = [
{"symbol": "circle", 'name': 'ryu'},
{"symbol": "diamond", 'name': 'host'},
{"symbol": "roundRect", 'name': 'dockerdata'},
]
ryu = opts.GraphNode(name="RYU", symbol_size=40, category=0) # symbol='roundRect'
nodes.append(ryu)
doc_id = 1
for key in r.keys():
host = opts.GraphNode(name=key, symbol_size=30, category=1) # symbol='diamond'
nodes.append(host)
ryuHostLink = opts.GraphLink(source="RYU", target=key)
links.append(ryuHostLink)
dockerlist = json.loads(r.get(key))
for doc in dockerlist:
docName = doc["Names"][0]
docInfo = str(key, encoding='utf-8') + '/' + doc["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]
new_node = opts.GraphNode(name=str(doc_id) + docName, symbol_size=20, category=2, value=docInfo)
nodes.append(new_node)
hostDocLink = opts.GraphLink(source=key, target=str(doc_id) + docName)
links.append(hostDocLink)
doc_id += 1
linestyle_opts = opts.LineStyleOpts(is_show=True,
width=2,
curve=0.1,
type_="solid",
color="orange",
)
g = (
Graph()
.add("", nodes, links, repulsion=1000, categories=categories,
label_opts=opts.LabelOpts(is_show=True, position="left", color='white'),
linestyle_opts=linestyle_opts)
.set_global_opts(title_opts=opts.TitleOpts(title=""))
)
return g
@app.route("/graphchart", methods=["GET"])
def get_bar_chart():
c = graph_base()
return c.dump_options_with_quotes()
if __name__ == '__main__':
app.run(host="127.0.0.1", port=5000, debug=True)
``` |
{
"source": "JoshuaYu-crash/CCodeAnalyze",
"score": 3
} |
#### File: CCodeAnalyze/test/test_keyCount.py
```python
import unittest
from keyCount import *
class MyTestCase(unittest.TestCase):
def test_readfile(self):
filepath = "./test_read.c"
ans = "int main() { printf(__STRING__); }"
self.assertEqual(readfile(filepath).strip(), ans)
def test_findRightIndex(self):
str1 = "{{{}}}"
str2 = "{{}{}{}}"
str3 = "{123321{} {}{{}}123321}"
ans1 = 4
ans2 = 2
ans3 = 23
self.assertEqual(findRightIndex(1, len(str1), str1), ans1)
self.assertEqual(findRightIndex(1, len(str2), str2), ans2)
self.assertEqual(findRightIndex(0, len(str3), str3), ans3)
def test_countKeys(self):
code = readfile("./test_matchCode.c")
self.assertEqual(countKeysByRE(code), 53)
def test_countSwitch(self):
code = readfile("./test_matchCode.c")
self.assertEqual(countSwitch(code), [2, [3, 2]])
def test_matchIf(self):
code1 = " if() ;"
code2 = " if() {}"
code3 = " if() if() ;"
ans1 = 6
ans2 = 7
ans3 = 11
self.assertEqual(matchIf(code1), ans1)
self.assertEqual(matchIf(code2), ans2)
self.assertEqual(matchIf(code3), ans3)
def test_matchElseIf(self):
code1 = " elseif() ;"
code2 = " elseif() {}"
code3 = " elseif() if() ;"
ans1 = 10
ans2 = 11
ans3 = 15
self.assertEqual(matchElseIf(code1), ans1)
self.assertEqual(matchElseIf(code2), ans2)
self.assertEqual(matchElseIf(code3), ans3)
def test_matchElse(self):
code1 = " else {}"
code2 = " else ;"
ans1 = 7
ans2 = 6
self.assertEqual(matchElse(code1), ans1)
self.assertEqual(matchElse(code2), ans2)
def test_matchCode(self):
code = readfile("./test_matchCode.c")
# print(code)
# exchange else if to elseif
code = re.sub("else\s+if", "elseif", code)
# exchange else{ to else {
code = re.sub("else{", "else {", code)
ie, iei = matchCode(code)
self.assertEqual(ie, 4)
self.assertEqual(iei, 4)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JoshuaYu-crash/LearnPyBackend",
"score": 2
} |
#### File: app/user/code.py
```python
import MySQLdb
from app.user import user
from flask import request, jsonify, current_app
from app.model.response import *
from app.model.dbmodel import *
from app.utils.jwtutils import *
import os
from app.setting import UPLOAD_PATH
from app.utils.codeutils import *
import datetime
@user.route("/code/run", methods=["POST"])
def userRunCode():
user = User.query.get(getUserId())
codeFile = request.json.get("codefile")
if codeFile is None or os.path.exists(os.path.join(UPLOAD_PATH, codeFile)) is None:
return jsonify(Error1002())
# codeCommitCount = Code.query.filter(
# Code.user_id == user.user_id,
# datetime.datetime.now() - Code.dt <= datetime.timedelta(1/24/60)
# ).count()
# if codeCommitCount > 3:
# return jsonify(Error1002())
code = Code(codepath=codeFile, user_id=user.user_id)
db.session.add(code)
db.session.commit()
codeHandler = CodeHandler(codeFile)
codeHandler.run()
return jsonify(OK(
codeid=code.code_id,
res=codeHandler.res
))
@user.route("/code/commit", methods=["POST"])
def userCommitCode():
user = User.query.get(getUserId())
codeId = request.json.get("codeid")
courseId = request.json.get("courseid")
problemId = request.json.get("problemid")
describe = request.json.get("describe")
username = request.json.get("username")
avatar = request.json.get("avatar")
code = Code.query.get(codeId)
course = Course.query.get(courseId)
problem = Problem.query.get(problemId)
if code is None or (course is None and problem is None) or (course is not None and problem is not None)\
or username is None or avatar is None:
return jsonify(Error1002())
if course:
code.course_id = course.course_id
if problem:
code.problem_id = problem.problem_id
code.is_commit = True
code.describe = describe
user.avatar = avatar
user.user_name = username
db.session.commit()
return jsonify(OK(codeid=code.code_id))
@user.route("/code/getcode")
def userGetCode():
codeId = request.args.get("codeid")
code = Code.query.get(codeId)
if code is None:
return jsonify(Error1002())
return jsonify(OK(
code={
"codeid": code.code_id,
"codefile": "http://" + current_app.config['HOST'] + "/file/download/" + code.codepath,
"describe": code.describe,
"is_commit": code.is_commit,
"is_show": code.is_show,
"dt": code.dt
}
))
``` |
{
"source": "JoshuaZero/awesome_python_test",
"score": 2
} |
#### File: awesome_python/library/config.py
```python
import os
conf_dict = {
'kafka': {
'bootstrap.servers': '172.16.31.10:30091,192.168.3.11:30092,192.168.127.12:30093',
'group.id': 'online_recognition_cg',
'auto.offset.reset': 'earliest',
'security.protocol': 'sasl_plaintext',
'sasl.mechanisms': 'PLAIN',
'sasl.username': 'account1',
'sasl.password': '<PASSWORD>'
},
'baidu_kafka': {
'bootstrap.servers': 'kafka.su.baidubce.com:9091',
'group.id': 'online_recognition_cg',
'auto.offset.reset': 'earliest',
'security.protocol': 'ssl',
'ssl.ca.location': '/work/dependency/kafka-cert/ca.pem',
'ssl.certificate.location': '/work/dependency/kafka-cert/client.pem',
'ssl.key.location': '/work/dependency/kafka-cert/client.key'
},
'influxdb': {
'server': 'bj-influxdb.aibee.cn',
'port': 80,
'user': '',
'password': '',
'db': 'face_data_pipeline'
},
}
def get(*args):
"""
优先从环境变量获取配置
:param args:
:return:
"""
env_name = ('_'.join(args)).upper()
val = os.getenv(env_name)
if val is None:
_conf_dict = conf_dict
for idx in range(len(args)):
k = args[idx]
if type(_conf_dict[k]) is dict:
if idx == len(args) - 1:
return _conf_dict[k]
else:
_conf_dict = _conf_dict[k]
return _conf_dict[k]
return val
```
#### File: awesome_python/library/hdfs_op.py
```python
from retrying import retry
import src.repository.global_params as global_params
from src.library.shell import run_system_command, run_system_command_with_res
from src.repository import constant
auth_status = False
def hdfs_auth(*dargs):
def wrap_simple(f):
def wrapped_f(*args, **kw):
global auth_status
if not auth_status:
cmd = "hdfscli initkrb5 -k {}/hadoop_configs/keytab/sjyw.keytab sjyw".format(constant.CONFIG_DIR)
run_system_command(cmd)
auth_status = True
return f(*args, **kw)
return wrapped_f
return wrap_simple(dargs[0])
@hdfs_auth
def hdfs_get(remote_file, local_file):
cmd = "hdfscli download /{}{} {}".format(global_params.get("hdfs_idc", "bj"), remote_file, local_file)
run_system_command(cmd)
@hdfs_auth
@retry(stop_max_attempt_number=3)
def hdfs_put(local_file, remote_file, force=True):
# hdfscli upload -f 存在一些问题
full_path = "/{}{}".format(global_params.get("hdfs_idc", "bj"), remote_file)
temp_full_path = "{}_backup".format(full_path)
temp_remote_file = "{}_backup".format(remote_file)
if force and hdfs_file_exist(remote_file):
if hdfs_file_exist(temp_remote_file):
del_cmd = "hdfscli delete -f {}".format(temp_full_path)
run_system_command(del_cmd)
rename_cmd = "hdfscli rename {} {}".format(full_path, temp_full_path)
run_system_command(rename_cmd)
cmd = "hdfscli upload {} {}".format(local_file, full_path)
run_system_command(cmd)
cmd = "hdfscli setacl 'group:supergroup:rw-' {}".format(full_path)
run_system_command(cmd)
@hdfs_auth
def hdfs_file_exist(file_path):
cmd = "hdfscli list /{}{}".format(global_params.get("hdfs_idc", "bj"), file_path)
status, _ = run_system_command_with_res(cmd, ignore_err=True)
if status != 0:
return False
return True
@hdfs_auth
def hdfs_mkdir(dir_path):
cmd = "hdfscli mkdir /{}{}".format(global_params.get("hdfs_idc", "bj"), dir_path)
run_system_command(cmd)
cmd = "hdfscli setacl 'group:supergroup:rw-' /{}{}".format(global_params.get("hdfs_idc", "bj"), dir_path)
run_system_command(cmd)
@hdfs_auth
def hdfs_copy(source_path, dst_path):
cmd = "hdfscli copy -f {} {}".format(source_path, dst_path)
run_system_command(cmd)
@hdfs_auth
def hdfs_delete(dst_path):
cmd = "hdfscli delete -f /{}{}".format(global_params.get("hdfs_idc", "bj"), dst_path)
run_system_command(cmd)
```
#### File: awesome_python/library/multiprocess.py
```python
import concurrent
import math
import threading
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Pool, cpu_count, Process
import src.repository.global_params as global_params
from src.library.logger import logger
from src.library.shell import run_system_command_with_res
class MultiProcess(object):
def __init__(self, work_num: int = 0):
if not work_num:
work_num = cpu_count()
self.work_num = work_num
self.pool = Pool(self.work_num)
self.params = []
self.func = None
self.res = None
def add_params(self, params):
self.params = params
def add_func(self, func):
self.func = func
def deal(self):
logger.info("generate {} worker pool for {}".format(self.work_num, self.func))
self.res = self.pool.starmap_async(self.func, self.params)
def wait(self):
logger.info("wait process finish")
if self.res:
self.res.get()
if self.pool:
self.pool.close()
def multiprocess_deal(func, deal_list, work_num: int = 0):
if not work_num:
work_num = cpu_count()
work_num = min(work_num, len(deal_list), 80)
logger.info("generate {} worker pool for {}".format(work_num, func))
pool = Pool(work_num)
res = pool.starmap(func, deal_list)
pool.close()
return res
def multiprocess_run(func, deal_list, work_num: int = 0):
if not work_num:
work_num = cpu_count()
work_num = min(work_num, 80)
logger.info("generate {} worker pool for {}".format(work_num, func))
pool = Pool(work_num)
res = pool.map(func, deal_list)
pool.close()
return res
def chunk(data_list: list, chunk_num):
item_num = len(data_list)
if item_num <= chunk_num:
return [data_list]
step = int(math.ceil(item_num / chunk_num))
res = []
if step <= 0:
return res
for i in range(0, item_num, step):
res.append(data_list[i:i + step])
return res
def multiprocess_exe(func, deal_list, work_num: int = 0):
if not work_num:
work_num = cpu_count()
process_list = []
deal_list = chunk(deal_list, work_num)
logger.info("generate {} worker pool for {}".format(work_num, func))
for i in range(work_num):
process_list.append(Process(target=func, args=(deal_list[i],)))
for process in process_list:
process.start()
for process in process_list:
process.join()
def get_process_num() -> int:
process_num = global_params.get("process_num", cpu_count())
process_num = int(process_num)
return min(process_num, cpu_count())
def get_gpu_num() -> int:
gpu_config_num = len(global_params.get("gpu_config", "0 1 2 3 4 5 6 7").split(" "))
gpu_num = gpu_config_num
try:
_, online_num = run_system_command_with_res("nvidia-smi -L |wc -l")
gpu_num = int(online_num)
except Exception as ex:
logger.error('get nvidia-smi num error: {}'.format(ex))
return min(gpu_config_num, gpu_num)
def multithread_run(func, deal_list, work_num: int = 0, max_execute_time=10):
if not work_num:
work_num = cpu_count()
work_num = min(work_num, 200)
logger.info("generate {} thread worker pool for {}".format(work_num, func))
res = []
with concurrent.futures.ThreadPoolExecutor(max_workers=work_num) as executor:
thread_tasks = {executor.submit(func, *params): params for params in deal_list}
for task in concurrent.futures.as_completed(thread_tasks):
try:
data = task.result(timeout=max_execute_time)
res.append(data)
except Exception as exc:
logger.error('generated an exception: {}'.format(exc))
return res
class Thread(threading.Thread):
def __init__(self, target, *args):
super().__init__()
self._target = target
self._args = args
self._result = None
def run(self):
self._result = self._target(*self._args)
def get_result(self):
return self._result
```
#### File: awesome_python/operator/magic_method.py
```python
import collections as clst
from random import choice
Card = clst.namedtuple('Card', ['rank','suit'])
class FrenchDeck:
ranks = [str(n) for n in range(2,11)] + list('JQKA')
suits = "spades diamonds clubs hearts".split()
def __init__(self):
self._cards = [Card(rank,suit) for suit in self.suits for rank in self.ranks]
def __len__(self):
return len(self._cards)
def __getitem__(self,position):
return self._cards[position]
suits_values = dict(spades=3, hearts=2, diamonds=1, clubs=0)
def spades_high(card):
rank_value = FrenchDeck.ranks.index(card.rank)
print("rank_value {}".format(rank_value))
print("lenth: {}".format(len(suits_values)))
print("current card suit: {}".format(suits_values[card.suit]))
return rank_value*len(suits_values) + suits_values[card.suit]
if __name__ == "__main__":
beer_card = Card('7','diamonds')
print(beer_card)
fd_op = FrenchDeck()
print(len(fd_op))
print(choice(fd_op))
for k in fd_op:
print(k)
print("-------------------------\n")
for f in reversed(fd_op):
print(f)
print("=======\n")
for i in sorted(fd_op, key=spades_high):
print(i)
``` |
{
"source": "joshuisken/domoradio",
"score": 4
} |
#### File: joshuisken/domoradio/collect_iradio_links.py
```python
import json
import os
import re
from html.parser import HTMLParser
from urllib.request import urlopen
urls = {
'https://www.hendrikjansen.nl/henk/streaming.html':
('NL', [
'NPO [3R].*',
]),
'https://www.hendrikjansen.nl/henk/streaming1.html':
('BE LU', [
'Radio [12]$',
'.*Klara.*',
]),
}
class MyHTMLParser(HTMLParser):
""" Collect internet radio links (<a> tags) from html
"""
url = ''
data = ''
def __init__(self, wanted, stations):
super().__init__()
# Make a regex that matches if any of our regexes match.
self.combined = "(" + ")|(".join(wanted) + ")"
self.stations = stations
def handle_starttag(self, tag, attrs):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
if name == "href":
self.url = value
def handle_endtag(self, tag):
if tag == "a":
if 'http' in self.url and re.match(self.combined, self.data):
# print("%-30s: %s" % (self.data, self.url))
self.stations[self.data] = self.url
def handle_data(self, data):
'Cleanup the name of the radio station'
self.data = data.lstrip(' —-+').strip()
def collect_stations(urls):
""" Parse the html files and slect radio station using a HTML parser
"""
radio_stations = {}
for url, (countries, wanted) in urls.items():
print('== %-10s' % countries, end=': ')
radio_stations[countries] = {}
response = urlopen(url)
text = response.read()
with open(os.path.basename(url), 'wb') as f:
f.write(text)
parser = MyHTMLParser(wanted, radio_stations[countries])
parser.feed(text.decode('utf-8'))
print(len(radio_stations[countries]))
return radio_stations
def main():
radio_stations = collect_stations(urls)
with open('radio_stations.json', 'w') as f:
print(json.dumps(radio_stations, sort_keys=True, indent=4), file=f)
if __name__ == '__main__':
main()
``` |
{
"source": "joshuisken/opentitan",
"score": 3
} |
#### File: util/reggen/gen_cheader.py
```python
import io
import logging as log
import re
import sys
import textwrap
import warnings
def genout(outfile, msg):
outfile.write(msg)
def as_define(s):
s = s.upper()
r = ''
for i in range(0, len(s)):
r += s[i] if s[i].isalnum() else '_'
return r
def first_line(s):
"""Returns the first line of a multi-line string"""
return s.splitlines()[0]
def format_comment(s):
"""Formats a string to comment wrapped to an 80 character line width
Returns wrapped string including newline and // comment characters.
"""
return '\n'.join(
textwrap.wrap(
s, width=77, initial_indent='// ', subsequent_indent='// ')) + '\n'
def gen_define(name, args, body, existing_defines, indent=' '):
r"""Produces a #define string, will split into two lines if a single line
has a width greater than 80 characters. Result includes newline.
Arguments:
name - Name of the #define
args - List of arguments for the define, provide an empty list if there are
none
body - Body of the #define
existing_defines - set of already generated define names. Error if `name` is in `existing_defines`.
indent - Gives string to prepend on any new lines produced by
wrapping (default ' ')
Example result:
name = 'A_MACRO'
args = ['arg1', 'arg2'],
body = 'arg1 + arg2 + 10'
#define A_MACRO(arg1, arg2) arg1 + arg2 + 10
When the macro is wrapped the break happens after the argument list (or
macro name if there is no argument list
#define A_MACRO(arg1, arg2) \
arg1 + arg2 + 10
"""
if name in existing_defines:
log.error("Duplicate #define for " + name)
sys.exit(1)
if len(args) != 0:
define_declare = '#define ' + name + '(' + ', '.join(args) + ')'
else:
define_declare = '#define ' + name
oneline_define = define_declare + ' ' + body
existing_defines.add(name)
if len(oneline_define) <= 80:
return oneline_define + '\n'
return define_declare + ' \\\n' + indent + body + '\n'
def gen_cdefine_register(outstr, reg, comp, width, rnames, existing_defines):
rname = reg['name']
offset = reg['genoffset']
genout(outstr, format_comment(first_line(reg['desc'])))
defname = as_define(comp + '_' + rname)
genout(
outstr,
gen_define(
defname, ['id'],
'(' + as_define(comp) + '##id##_BASE_ADDR + ' + hex(offset) + ')',
existing_defines))
genout(
outstr,
gen_define(defname + '_REG_OFFSET', [], hex(offset), existing_defines))
for field in reg['fields']:
fieldlsb = field['bitinfo'][2]
fname = field['name']
dname = defname + '_' + as_define(fname)
if field['bitinfo'][1] == 1:
# single bit
genout(outstr,
gen_define(dname, [], str(fieldlsb), existing_defines))
else:
# multiple bits (unless it is the whole register)
if field['bitinfo'][1] != width:
mask = field['bitinfo'][0] >> fieldlsb
genout(
outstr,
gen_define(dname + '_MASK', [], hex(mask),
existing_defines))
genout(
outstr,
gen_define(dname + '_OFFSET', [], str(fieldlsb),
existing_defines))
if 'enum' in field:
for enum in field['enum']:
ename = as_define(enum['name'])
genout(
outstr,
gen_define(
defname + '_' + as_define(field['name']) + '_' +
ename, [], enum['value'], existing_defines))
genout(outstr, '\n')
return
def gen_cdefine_window(outstr, win, comp, regwidth, rnames, existing_defines):
wname = win['name']
offset = win['genoffset']
genout(outstr, format_comment('Memory area: ' + first_line(win['desc'])))
defname = as_define(comp + '_' + wname)
genout(
outstr,
gen_define(
defname, ['id'],
'(' + as_define(comp) + '##id##_BASE_ADDR + ' + hex(offset) + ')',
existing_defines))
genout(
outstr,
gen_define(defname + '_REG_OFFSET', [], hex(offset), existing_defines))
items = int(win['items'])
genout(
outstr,
gen_define(defname + '_SIZE_WORDS', [], str(items), existing_defines))
items = items * (regwidth // 8)
genout(
outstr,
gen_define(defname + '_SIZE_BYTES', [], str(items), existing_defines))
wid = win['genvalidbits']
if (wid != regwidth):
mask = (1 << wid) - 1
genout(outstr,
gen_define(defname + '_MASK ', [], hex(mask), existing_defines))
def gen_cdefines_module_param(outstr, param, module_name, existing_defines):
param_type = param['type']
# Presently there is only one type (int), however if the new types are
# added, they potentially need to be handled differently.
known_types = ["int"]
if param_type not in known_types:
warnings.warn(
"Cannot generate a module define of type {}".format(param_type))
return
genout(outstr, format_comment(first_line(param['desc'])))
define_name = as_define(module_name + '_PARAM_' + param['name'])
if param_type == "int":
define = gen_define(define_name, [], param['default'],
existing_defines)
genout(outstr, define)
genout(outstr, '\n')
def gen_cdefines_module_params(outstr, module_data, module_name,
register_width, existing_defines):
module_params = set()
if 'param_list' in module_data:
module_params = module_data['param_list']
for param in module_params:
gen_cdefines_module_param(outstr, param, module_name, existing_defines)
genout(outstr, format_comment(first_line("Register width")))
define_name = as_define(module_name + '_PARAM_REG_WIDTH')
define = gen_define(define_name, [], str(register_width), existing_defines)
genout(outstr, define)
genout(outstr, '\n')
def gen_multireg_field_defines(outstr, regname, field, subreg_num, regwidth,
existing_defines):
field_width = field['bitinfo'][1]
fields_per_reg = regwidth // field_width
define_name = regname + '_' + as_define(field['name'] + "_FIELD_WIDTH")
define = gen_define(define_name, [], str(field_width), existing_defines)
genout(outstr, define)
define_name = regname + '_' + as_define(field['name'] + "_FIELDS_PER_REG")
define = gen_define(define_name, [], str(fields_per_reg), existing_defines)
genout(outstr, define)
define_name = regname + "_MULTIREG_COUNT"
define = gen_define(define_name, [], str(subreg_num), existing_defines)
genout(outstr, define)
genout(outstr, '\n')
def gen_cdefine_multireg(outstr, register, component, regwidth, rnames,
existing_defines):
multireg = register['multireg']
subregs = multireg['genregs']
comment = multireg['desc'] + " (common parameters)"
genout(outstr, format_comment(first_line(comment)))
if len(multireg['fields']) == 1:
regname = as_define(component + '_' + multireg['name'])
gen_multireg_field_defines(outstr, regname, multireg['fields'][0],
len(subregs), regwidth, existing_defines)
else:
log.warn("Non-homogeneous multireg " + multireg['name'] +
" skip multireg specific data generation.")
for subreg in subregs:
gen_cdefine_register(outstr, subreg, component, regwidth, rnames,
existing_defines)
# Must have called validate, so should have no errors
def gen_cdefines(regs, outfile, src_lic, src_copy):
component = regs['name']
registers = regs['registers']
rnames = regs['genrnames']
outstr = io.StringIO()
# This tracks the defines that have been generated so far, so we
# can error if we attempt to duplicate a definition
existing_defines = set()
if 'regwidth' in regs:
regwidth = int(regs['regwidth'], 0)
else:
regwidth = 32
gen_cdefines_module_params(outstr, regs, component, regwidth,
existing_defines)
for x in registers:
if 'reserved' in x:
continue
if 'skipto' in x:
continue
if 'sameaddr' in x:
for sareg in x['sameaddr']:
gen_cdefine_register(outstr, sareg, component, regwidth,
rnames, existing_defines)
continue
if 'window' in x:
gen_cdefine_window(outstr, x['window'], component, regwidth,
rnames, existing_defines)
continue
if 'multireg' in x:
gen_cdefine_multireg(outstr, x, component, regwidth, rnames,
existing_defines)
continue
gen_cdefine_register(outstr, x, component, regwidth, rnames,
existing_defines)
generated = outstr.getvalue()
outstr.close()
genout(outfile, '// Generated register defines for ' + component + '\n\n')
if src_copy != '':
genout(outfile, '// Copyright information found in source file:\n')
genout(outfile, '// ' + src_copy + '\n\n')
if src_lic != None:
genout(outfile, '// Licensing information found in source file:\n')
for line in src_lic.splitlines():
genout(outfile, '// ' + line + '\n')
genout(outfile, '\n')
genout(outfile, '#ifndef _' + as_define(component) + '_REG_DEFS_\n')
genout(outfile, '#define _' + as_define(component) + '_REG_DEFS_\n\n')
genout(outfile, generated)
genout(outfile, '#endif // _' + as_define(component) + '_REG_DEFS_\n')
genout(outfile, '// End generated register defines for ' + component)
return
def test_gen_define():
basic_oneline = '#define MACRO_NAME body\n'
assert gen_define('MACRO_NAME', [], 'body', set()) == basic_oneline
basic_oneline_with_args = '#define MACRO_NAME(arg1, arg2) arg1 + arg2\n'
assert (gen_define('MACRO_NAME', ['arg1', 'arg2'], 'arg1 + arg2',
set()) == basic_oneline_with_args)
long_macro_name = 'A_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_LONG_MACRO_NAME'
multiline = ('#define ' + long_macro_name + ' \\\n' +
' a_fairly_long_body + something_else + 10\n')
assert (gen_define(long_macro_name, [],
'a_fairly_long_body + something_else + 10',
set()) == multiline)
multiline_with_args = ('#define ' + long_macro_name +
'(arg1, arg2, arg3) \\\n' +
' a_fairly_long_body + arg1 + arg2 + arg3\n')
assert (gen_define(long_macro_name, ['arg1', 'arg2', 'arg3'],
'a_fairly_long_body + arg1 + arg2 + arg3',
set()) == multiline_with_args)
multiline_with_args_big_indent = (
'#define ' + long_macro_name + '(arg1, arg2, arg3) \\\n' +
' a_fairly_long_body + arg1 + arg2 + arg3\n')
assert (gen_define(long_macro_name, ['arg1', 'arg2', 'arg3'],
'a_fairly_long_body + arg1 + arg2 + arg3',
set(),
indent=' ') == multiline_with_args_big_indent)
``` |
{
"source": "JoshunCox/NasdaqCloudDataService-SDK-Python",
"score": 3
} |
#### File: ncdsclient/internal/AvroDeserializer.py
```python
import io
from avro.io import DatumReader, BinaryDecoder
import json
import logging
class AvroDeserializer():
"""
Decodes the given schema for the user and returns the decoded data.
Wrapper for the AvroDeserializer.
Attributes:
schema (Schema): the schema loaded from a schema file
"""
def __init__(self, schema):
self.schema = schema
self.logger = logging.getLogger(__name__)
def decode(self, msg_value, ctx):
reader = DatumReader(self.schema)
message_bytes = io.BytesIO(msg_value)
decoder = BinaryDecoder(message_bytes)
try:
event_dict = reader.read(decoder)
except Exception as e:
logging.exception(e)
raise e
union_schema = True
try:
# Get the union index to get the schema and schema name
schema_message_bytes = io.BytesIO(msg_value)
schema_decoder = BinaryDecoder(schema_message_bytes)
schema_index = int(schema_decoder.read_long())
schema_name = reader.readers_schema.schemas[schema_index].name
except Exception as e:
union_schema = False
pass
for key in event_dict:
if type(event_dict[key]) == str:
event_dict[key] = event_dict[key].strip()
# Initialize schema name in the message based on type of schema
if union_schema:
event_dict["schema_name"] = schema_name
else:
event_dict["schema_name"] = reader.readers_schema.name
return event_dict
```
#### File: ncdsclient/internal/BasicKafkaConsumer.py
```python
from confluent_kafka import DeserializingConsumer
from confluent_kafka.error import (KeyDeserializationError,
ValueDeserializationError)
from confluent_kafka.serialization import (SerializationContext,
MessageField)
import logging
class BasicKafkaConsumer(DeserializingConsumer):
"""
This is the base class for all Kafka consumers.
It expands the confluent-kafka Python `DeserializingConsumer <https://docs.confluent.io/platform/current/clients/confluent-kafka-python/html/index.html#confluent_kafka.DeserializingConsumer>`_ class by adding some utility methods.
Attributes:
config (dict): stores dict that stores configuration properties for the confluent-kafka Python `DeserializingConsumer <https://docs.confluent.io/platform/current/clients/confluent-kafka-python/html/index.html#confluent_kafka.DeserializingConsumer>`_
key_deserializer (Deserializer): deserializer used for message keys
value_deserializer (func): decode function used to deserialize message values
"""
def __init__(self, config, key_deserializer, value_deserializer):
config["key.deserializer"] = key_deserializer
config["value.deserializer"] = value_deserializer.decode
self.logger = logging.getLogger(__name__)
super(BasicKafkaConsumer, self).__init__(config)
def ensure_assignment(self):
"""
Ensures that the consumer is assigned,
Returns:
a list of TopicPartitions that the consumer has been assigned to
:rtype: list(`TopicPartitions <https://docs.confluent.io/platform/current/clients/confluent-kafka-python/html/index.html#pythonclient-topicpartition>`_)
"""
super(BasicKafkaConsumer, self).poll(0)
return super(BasicKafkaConsumer, self).assignment()
def consume(self, num_messages=1, timeout=-1):
"""
Consume up to the number of messages specified with a timeout for each request
Args:
num_messages (int): The maximum number of messages to wait for.
timeout (float): Maximum time to block waiting for message(Seconds).
Returns:
:py:class:`Message` or None on timeout
Raises:
KeyDeserializationError: If an error occurs during key deserialization.
ValueDeserializationError: If an error occurs during value deserialization.
RuntimeError: if the number of messages is less than 1
"""
if num_messages < 1:
raise RuntimeError(
"The maximum number of messages must be greater than or equal to 1.")
messages = super(DeserializingConsumer, self).consume(
num_messages, timeout)
if messages is None:
return []
deserialized_messages = []
for message in messages:
deserialized_messages.append(
self._parse_deserialize_message(message))
return deserialized_messages
def _parse_deserialize_message(self, message):
"""
Internal class method for deserializing and maintaining consistency between poll and consume classes.
This function will take in a raw serialized message (from cimpl) and return a deserialized message back.
Args:
message (cimpl.Message): The serialized message returned from the base consumer class.
Returns:
:py:class:`Message` on sucessful deserialization
Raises:
KeyDeserializationError: If an error occurs during key deserialization.
ValueDeserializationError: If an error occurs during value deserialization.
"""
ctx = SerializationContext(message.topic(), MessageField.VALUE)
value = message.value()
if self._value_deserializer is not None:
try:
value = self._value_deserializer(value, ctx)
except Exception as se:
raise ValueDeserializationError(
exception=se, kafka_message=message)
key = message.key()
ctx.field = MessageField.KEY
if self._key_deserializer is not None:
try:
key = self._key_deserializer(key, ctx)
except Exception as se:
raise KeyDeserializationError(
exception=se, kafka_message=message)
message.set_key(key)
message.set_value(value)
return message
```
#### File: internal/utils/SeekToMidnight.py
```python
import logging
import sys
from confluent_kafka import OFFSET_BEGINNING, OFFSET_INVALID
from datetime import datetime, date, timedelta, time
logger = logging.getLogger(__name__)
def seek_to_midnight_at_past_day(kafka_avro_consumer, topic_partition, num_days_ago=0):
topic_partition.offset = get_timestamp_at_midnight(num_days_ago)
logger.debug(
f"Num days ago: {num_days_ago}. Setting partition offset to timestamp: {topic_partition.offset}")
try:
logger.debug(f"topic partition: {topic_partition}")
offsets_for_times = kafka_avro_consumer.offsets_for_times(
[topic_partition], timeout=5)
except Exception as e:
logger.exception(e)
sys.exit(0)
logger.debug(f"{offsets_for_times[0]}: offsets for times")
partition_offset = offsets_for_times[0].offset
if offsets_for_times and partition_offset is not OFFSET_INVALID:
topic_partition.offset = partition_offset
logger.debug(f"Seeking to topic partition: {topic_partition}")
logger.debug(
f"making sure partition is assigned before seeking: {kafka_avro_consumer.ensure_assignment()}")
kafka_avro_consumer.seek(topic_partition)
else:
topic_partition.offset = OFFSET_BEGINNING
logger.debug(
f"No available offset. Continuing to seek from OFFSET_BEGINNING: {topic_partition}")
kafka_avro_consumer.seek(topic_partition)
return kafka_avro_consumer
def get_timestamp_at_midnight(num_days_ago=0):
past_day = date.today()-timedelta(days=num_days_ago)
midnight = datetime.combine(past_day, time.min)
return int(midnight.timestamp() * 1000)
```
#### File: src/tests/NCDSSDKPyTest.py
```python
from ncdssdk.src.tests.utils.NCDSTestUtil import NCDSTestUtil
from ncdssdk import NCDSClient
import json
import pytest
ncds_test_util = NCDSTestUtil()
def test_NCDS_client():
ncds_client = NCDSClient(None, None)
assert ncds_client is not None
def test_list_topics_for_the_client():
ncds_client = NCDSClient(None, None)
topics = ncds_client.list_topics_for_client()
added_topics = ncds_test_util.get_added_topics()
assert topics.sort() == added_topics.sort()
def test_get_schema_for_the_topic():
ncds_client = NCDSClient(None, None)
topic = "GIDS"
schema_from_sdk = ncds_client.get_schema_for_topic(topic)
schema_file = "testGIDS.avsc"
schema_from_file = ncds_test_util.get_schema_for_topic(schema_file)
assert schema_from_sdk == schema_from_file
def test_top_messages_with_timestamp():
topic = "MOCK"
mock_records = ncds_test_util.get_mock_messages()
for mock_record in mock_records:
mock_record["schema_name"] = "SeqEtpIpvValue"
mock_records_from_kafka = []
timestamp = ncds_test_util.timestamp_to_seek_from
ncds_client = NCDSClient(None, None)
records = ncds_client.top_messages(topic, timestamp)
for record in records:
print(record.offset(), record.timestamp())
mock_records_from_kafka.append(record.value())
assert len(mock_records_from_kafka) == 8
assert mock_records[2:] == mock_records_from_kafka
def test_insertion():
mock_records = ncds_test_util.get_mock_messages()
for mock_record in mock_records:
mock_record["schema_name"] = "SeqEtpIpvValue"
mock_records_from_kafka = []
topic = "MOCK"
ncds_client = NCDSClient(None, None)
records = ncds_client.top_messages(topic)
for record in records:
mock_records_from_kafka.append(record.value())
assert mock_records == mock_records_from_kafka
def test_get_sample_message():
mock_records = ncds_test_util.get_mock_messages()
mock_msg = mock_records[-1]
mock_msg["schema_name"] = "SeqEtpIpvValue"
topic = "MOCK"
msg_name = "SeqEtpIpvValue"
ncds_client = NCDSClient(None, None)
record = ncds_client.get_sample_messages(topic, msg_name, False)
assert str(mock_msg) == record
def test_get_all_sample_messages():
GIDS_records = ncds_test_util.get_GIDS_messages()
for i, record in enumerate(GIDS_records):
if i < 5:
record["schema_name"] = "SeqEquitiesSummary"
else:
record["schema_name"] = "SeqEtpIpvValue"
topic = "GIDS"
msg_name = "SeqEtpIpvValue"
ncds_client = NCDSClient(None, None)
records = ncds_client.get_sample_messages(topic, msg_name, True)
print("mock records: ", GIDS_records)
print("records from ncdsclient: ", records)
assert len(records) == 5
assert GIDS_records[5:] == records
def test_get_sample_message_incorrect_topic():
mock_records = ncds_test_util.get_mock_messages()
mock_msg = mock_records[0]
mock_msg["schema_name"] = "SeqEtpIpvValue"
topic = "MUCK"
msg_name = "SeqEtpIpvValue"
ncds_client = NCDSClient(None, None)
with pytest.raises(Exception):
ncds_client.get_sample_messages(topic, msg_name, False)
def test_get_schema_for_the_incorrect_topic():
ncds_client = NCDSClient(None, None)
topic = "MOCK"
schema_from_sdk = ncds_client.get_schema_for_topic(topic)
schema_file = "testGIDS.avsc"
schema_from_file = ncds_test_util.get_schema_for_topic(schema_file)
assert schema_from_sdk != schema_from_file
# with pytest.raises(Exception):
# schema_from_file = ncds_test_util.get_schema_for_topic(schema_file)
``` |
{
"source": "joshunrau/CognitiveSubtypes",
"score": 2
} |
#### File: src/models/classify.py
```python
import warnings
from abc import abstractmethod
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.metrics import accuracy_score, balanced_accuracy_score, roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.utils.validation import check_array, check_is_fitted, NotFittedError
from skopt import BayesSearchCV
from skopt.space import Categorical, Integer, Real
from .base import BaseModel
class BaseClassifier(BaseModel):
available_metrics = {
'accuracy': accuracy_score,
'balanced_accuracy': balanced_accuracy_score,
'roc_auc': roc_auc_score
}
prob_metrics = ['roc_auc']
fs_param_grid = {}
def __init__(self, score_method: str = 'accuracy') -> None:
super().__init__()
self._pipeline = Pipeline([
('scaler', StandardScaler()),
("clf", self.sklearn_estimator())
])
self._score_method = score_method
def __str__(self) -> str:
return str(self.sklearn_estimator.__name__)
@property
@abstractmethod
def clf_param_grid(self) -> dict:
pass
@property
def param_grid(self) -> dict:
return self.fs_param_grid | self.clf_param_grid
@property
@abstractmethod
def n_iter(self) -> int:
pass
@property
def pipeline(self) -> Pipeline:
return self._pipeline
@pipeline.setter
def pipeline(self, pipeline: Pipeline) -> None:
if not isinstance(pipeline, Pipeline):
raise TypeError
self._pipeline = pipeline
def is_fitted(self) -> bool:
try:
check_is_fitted(self.grid_)
except (AttributeError, NotFittedError):
return False
return True
def fit(self, X: np.ndarray, y: np.ndarray, surpress_warnings = True, **kwargs) -> None:
super().fit(X, y)
if self._score_method not in self.available_metrics.keys():
raise ValueError(f"Scoring must be one of: {self.available_metrics.keys()}")
self.grid_ = BayesSearchCV(
self.pipeline,
self.param_grid,
n_jobs=-1,
scoring=self._score_method,
n_iter=self.n_iter,
cv=5,
**kwargs)
print("Begin fitting best classifier for model: " + str(self))
if surpress_warnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.grid_.fit(X, y)
else:
self.grid_.fit(X, y)
self.n_targets_ = len(np.unique(y))
self.best_estimator_ = self.grid_.best_estimator_
self.best_params_ = self.grid_.best_params_
self.best_score_ = self.grid_.best_score_
self.classes_ = self.grid_.classes_
self.n_features_in_ = self.grid_.n_features_in_
print("Done!")
print(f"{self._score_method}: {self.best_score_}")
def predict(self, X: np.ndarray) -> None:
self.check_is_fitted()
check_array(X)
return self.grid_.predict(X)
def get_y_score(self, X: np.ndarray):
self.check_is_fitted()
check_array(X)
if len(self.classes_) != 2:
raise ValueError("This method was created for binary classes")
try:
return self.grid_.predict_proba(X)[:, 1]
except AttributeError:
return self.grid_.decision_function(X)
def score(self, X: np.ndarray, y: np.ndarray):
return self.grid_.score(X, y)
class BestKNeighborsClassifier(BaseClassifier):
sklearn_estimator = KNeighborsClassifier
clf_param_grid = {
'clf__n_neighbors': Integer(1, 20),
'clf__weights': Categorical(['uniform', 'distance']),
'clf__metric': Categorical(['euclidean', 'manhattan', 'minkowski']),
}
n_iter = 25
class BestSVC(BaseClassifier):
sklearn_estimator = SVC
clf_param_grid = {
'clf__C': Real(1e-2, 1e+3, 'log-uniform'),
'clf__gamma': Real(1e-4, 1e+1, 'log-uniform'),
'clf__degree': Integer(1, 3),
'clf__kernel': Categorical(['linear', 'poly', 'rbf']),
'clf__probability': Categorical([True])
}
n_iter = 50
class BestRidgeClassifier(BaseClassifier):
sklearn_estimator = RidgeClassifier
clf_param_grid = {
'clf__alpha': Real(1e-4, 1e+0, 'log-uniform'),
'clf__class_weight': Categorical(['balanced'])
}
n_iter = 15
class BestRandomForestClassifier(BaseClassifier):
sklearn_estimator = RandomForestClassifier
clf_param_grid = {
'clf__n_estimators': Integer(50, 500),
'clf__max_depth': Integer(5, 50),
'clf__max_features': Real(1e-2, 1e+0, 'log-uniform'),
'clf__min_samples_split': Integer(2, 5),
'clf__min_samples_leaf': Integer(1, 5),
'clf__class_weight': Categorical(['balanced'])
}
n_iter = 40
class BestGradientBoostingClassifier(BaseClassifier):
sklearn_estimator = GradientBoostingClassifier
clf_param_grid = {
"clf__loss": Categorical(["deviance"]),
"clf__learning_rate": Real(1e-3, 5e-1, 'log-uniform'),
"clf__min_samples_split": Real(0.1, 0.9, 'log-uniform'),
"clf__min_samples_leaf": Real(0.1, 0.5, 'log-uniform'),
"clf__max_depth": Integer(2, 10),
"clf__max_features": Categorical(["log2","sqrt"]),
"clf__criterion": Categorical(["friedman_mse", "squared_error"]),
"clf__subsample": Real(0.5, 1, 'log-uniform')
}
n_iter = 50
class ClassifierSearch:
def __init__(self):
self.classifiers = []
for clf in [BestKNeighborsClassifier, BestRidgeClassifier, BestRandomForestClassifier]:
self.classifiers.append(clf(score_method='roc_auc'))
self.model_names = ['KNN', 'RDG', 'RFC']
self.roc_auc_scores = None
def fit(self, data):
self.roc_auc_scores = {"Train": [], "Test": []}
for clf in self.classifiers:
x_train = data.train.df[data.imaging_feature_names].to_numpy()
x_test = data.test.df[data.imaging_feature_names].to_numpy()
clf.fit(x_train, data.train.target, surpress_warnings=True, verbose=False)
self.roc_auc_scores["Train"].append(clf.best_score_)
self.roc_auc_scores["Test"].append(clf.score(x_test, data.test.target))
self.roc_auc_scores = pd.DataFrame(self.roc_auc_scores, index=self.model_names)
self.best_classifier = self.classifiers[self.model_names.index(self.roc_auc_scores["Test"].idxmax())]
def get_feature_importances(best_random_forest, feature_names):
forest = best_random_forest.best_estimator_.named_steps['clf']
if len(feature_names) != len(forest.feature_importances_):
raise ValueError
forest_importances = {k:v for k, v in zip(feature_names, forest.feature_importances_)}
regional_importances = {}
for feature in forest_importances:
for measure in ['area', 'thickness', 'volume']:
if feature.startswith(measure):
try:
regional_importances[feature.replace(measure, '')][measure] = forest_importances[feature]
except KeyError:
regional_importances[feature.replace(measure, '')] = {}
regional_importances[feature.replace(measure, '')][measure] = forest_importances[feature]
regional_importances = pd.DataFrame.from_dict(regional_importances).T
regional_importances.index = regional_importances.index.map(lambda x: x.lower().replace("left", "lh_").replace("right", "rh_"))
regional_importances = regional_importances.rename(lambda x: x.capitalize(), axis=1)
return regional_importances
```
#### File: src/models/cluster.py
```python
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import calinski_harabasz_score, davies_bouldin_score, silhouette_score
from .base import BaseModel
from ..utils import get_array_counts
class BestKMeans(BaseModel):
sklearn_estimator = KMeans
available_metrics = {
"calinski_harabasz": calinski_harabasz_score,
"davies_bouldin": davies_bouldin_score,
"silhouette": silhouette_score
}
def __init__(self, k_min: int = 2, k_max: int = 6):
super().__init__()
self._estimator = self.sklearn_estimator
self._k_min = k_min
self._k_max = k_max
def fit(self, X: np.ndarray, y: None = None) -> None:
super().fit(X, y)
self.models_ = {}
self.scores_ = {}
for k in range(self._k_min, self._k_max + 1):
model, model_name = self._estimator(n_clusters=k), k
y_pred = model.fit_predict(X)
self.models_[model_name] = model
self.scores_[model_name] = {
name: metric(X, y_pred) for name, metric in self.available_metrics.items()
}
def is_fitted(self) -> bool:
if self.models_ is None or self.scores_ is None:
return False
return True
def predict(self, X: np.array, k: int, return_counts: bool = False):
super().predict(X)
y_pred = self.models_[k].predict(X)
if return_counts:
return get_array_counts(y_pred)
return y_pred
``` |
{
"source": "joshunrau/qcivet",
"score": 3
} |
#### File: qcivet/civetqc/app.py
```python
import argparse
from pathlib import Path
from .resources import config
class App:
name = config['app']['name']
description = config['app']['description']
version = config['app']['version']
def __init__(self) -> None:
self.parser = argparse.ArgumentParser(
prog = self.name,
description = self.description,
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
self.parser.add_argument(
"-v", "--version",
action="version",
version=f"%(prog)s {self.version}"
)
self.parser.add_argument(
"filepath",
help="path to csv file outputted by CIVET",
type=Path
)
self.parser.add_argument(
"-o", "--output_dir",
default=Path.cwd(),
help="directory where results should be outputted",
type=Path,
metavar=''
)
self._args = None
@property
def args(self):
return self._args
@args.setter
def args(self, value: argparse.Namespace):
if not value.filepath.is_file():
raise FileNotFoundError(f"File not found: {value.filepath}")
elif not value.output_dir.is_dir():
raise NotADirectoryError(f"Directory not found: {value.output_dir}")
def parse_args(self, argv: list):
self.args = self.parser.parse_args(argv)
```
#### File: qcivet/civetqc/models.py
```python
import pickle
import warnings
from abc import ABC, abstractmethod
from typing import Type
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.metrics import accuracy_score, balanced_accuracy_score, roc_auc_score, f1_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y, NotFittedError
from skopt import BayesSearchCV
from skopt.space import Categorical, Integer, Real
class BaseClassifier(ABC):
available_metrics = {
'accuracy': accuracy_score,
'balanced_accuracy': balanced_accuracy_score,
'roc_auc': roc_auc_score,
'f1': f1_score
}
prob_metrics = ['roc_auc']
fs_param_grid = {}
def __init__(self, score_method: str = 'accuracy') -> None:
self._pipeline = Pipeline([
('scaler', StandardScaler()),
("clf", self.sklearn_estimator())
])
self._score_method = score_method
def __str__(self) -> str:
return str(self.sklearn_estimator.__name__)
@property
@abstractmethod
def clf_param_grid(self) -> dict:
pass
@property
@abstractmethod
def n_iter(self) -> int:
pass
@property
@abstractmethod
def sklearn_estimator(self) -> Type[BaseEstimator]:
pass
@property
def param_grid(self) -> dict:
return self.fs_param_grid | self.clf_param_grid
@property
def pipeline(self) -> Pipeline:
return self._pipeline
@pipeline.setter
def pipeline(self, pipeline: Pipeline) -> None:
if not isinstance(pipeline, Pipeline):
raise TypeError
self._pipeline = pipeline
def check_is_fitted(self) -> None:
if not self.is_fitted():
raise NotFittedError("Object must be fitted before method call")
def is_fitted(self) -> bool:
try:
check_is_fitted(self.grid_)
except (AttributeError, NotFittedError):
return False
return True
def fit(self, X: np.ndarray, y: np.ndarray, surpress_warnings = True, **kwargs) -> None:
if y is None:
check_array(X)
else:
check_X_y(X, y)
self.X_ = X
self.y_ = y
if self._score_method not in self.available_metrics.keys():
raise ValueError(f"Scoring must be one of: {self.available_metrics.keys()}")
self.grid_ = BayesSearchCV(
self.pipeline,
self.param_grid,
n_jobs=-1,
scoring=self._score_method,
n_iter=self.n_iter,
cv=5,
**kwargs)
print("Begin fitting best classifier for model: " + str(self))
if surpress_warnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.grid_.fit(X, y)
else:
self.grid_.fit(X, y)
self.n_targets_ = len(np.unique(y))
self.best_estimator_ = self.grid_.best_estimator_
self.best_params_ = self.grid_.best_params_
self.best_score_ = self.grid_.best_score_
self.classes_ = self.grid_.classes_
self.n_features_in_ = self.grid_.n_features_in_
print("Done!")
print(f"{self._score_method}: {self.best_score_}")
def predict(self, X: np.ndarray) -> None:
self.check_is_fitted()
check_array(X)
return self.grid_.predict(X)
def get_y_score(self, X: np.ndarray):
self.check_is_fitted()
check_array(X)
if len(self.classes_) != 2:
raise ValueError("This method was created for binary classes")
try:
return self.grid_.predict_proba(X)[:, 1]
except AttributeError:
return self.grid_.decision_function(X)
def score(self, X: np.ndarray, y: np.ndarray):
return self.grid_.score(X, y)
def save(self, filepath):
with open(filepath, "wb") as file:
pickle.dump(self, file)
class BestDummyClassifier(BaseClassifier):
sklearn_estimator = DummyClassifier
clf_param_grid = {
'clf__strategy': Categorical(['most_frequent'])
}
n_iter = 1
class BestRandomForestClassifier(BaseClassifier):
sklearn_estimator = RandomForestClassifier
clf_param_grid = {
'clf__n_estimators': Integer(50, 500),
'clf__max_depth': Integer(5, 50),
'clf__max_features': Real(1e-2, 1e+0, 'log-uniform'),
'clf__min_samples_split': Integer(2, 5),
'clf__min_samples_leaf': Integer(1, 5),
'clf__class_weight': Categorical(['balanced'])
}
n_iter = 40
class BestGradientBoostingClassifier(BaseClassifier):
sklearn_estimator = GradientBoostingClassifier
clf_param_grid = {
"clf__loss": Categorical(["log_loss"]),
"clf__learning_rate": Real(1e-3, 5e-1, 'log-uniform'),
"clf__min_samples_split": Real(0.1, 0.9, 'log-uniform'),
"clf__min_samples_leaf": Real(0.1, 0.5, 'log-uniform'),
"clf__max_depth": Integer(2, 10),
"clf__max_features": Categorical(["log2","sqrt"]),
"clf__criterion": Categorical(["friedman_mse", "squared_error"]),
"clf__subsample": Real(0.5, 1, 'log-uniform')
}
n_iter = 50
class BestRidgeClassifier(BaseClassifier):
sklearn_estimator = RidgeClassifier
clf_param_grid = {
'clf__alpha': Real(1e-4, 1e+0, 'log-uniform'),
'clf__class_weight': Categorical(['balanced'])
}
n_iter = 15
class BestKNeighborsClassifier(BaseClassifier):
sklearn_estimator = KNeighborsClassifier
clf_param_grid = {
'clf__n_neighbors': Integer(1, 20),
'clf__weights': Categorical(['uniform', 'distance']),
'clf__metric': Categorical(['euclidean', 'manhattan', 'minkowski']),
}
n_iter = 25
class BestSVC(BaseClassifier):
sklearn_estimator = SVC
clf_param_grid = {
'clf__C': Real(1e-2, 1e+3, 'log-uniform'),
'clf__gamma': Real(1e-4, 1e+1, 'log-uniform'),
'clf__degree': Integer(1, 3),
'clf__kernel': Categorical(['linear', 'poly', 'rbf']),
'clf__probability': Categorical([True]),
'clf__class_weight': Categorical([None, 'balanced'])
}
n_iter = 50
class ClassifierSearch:
def __init__(self, score_method = 'roc_auc'):
self._score_method = score_method
self.classifiers = {
"dummy": BestDummyClassifier,
"forest": BestRandomForestClassifier,
"gb": BestGradientBoostingClassifier,
"ridge": BestRidgeClassifier,
"knn": BestKNeighborsClassifier,
}
for name, clf in self.classifiers.items():
self.classifiers[name] = clf(score_method=score_method)
self.scores = None
def fit(self, X_train, X_test, y_train, y_test):
self.scores = {"Train": {}, "Test": {}}
for name, clf in self.classifiers.items():
clf.fit(X_train, y_train, surpress_warnings=True, verbose=False)
self.scores["Train"][name] = clf.best_score_
self.scores["Test"][name] = clf.score(X_test, y_test)
self.scores = pd.DataFrame(self.scores)
self.best_classifier = self.classifiers[self.scores["Train"].idxmax()]
```
#### File: civetqc/resources/__init__.py
```python
import os
import json
from pkg_resources import resource_filename, resource_listdir
with open(resource_filename(__name__, "config.json")) as file:
config = json.load(file)
class ResourceFilepaths:
@classmethod
@property
def saved_models(cls):
return cls.load("saved_models", ".pkl")
@classmethod
@property
def simulated_data(cls):
""" method from saved models """
return cls.load("simulated_data", ".csv")
@staticmethod
def load(directory: str, file_extension: str):
filepaths = {}
for filename in resource_listdir(__name__, directory):
if filename.endswith(file_extension):
filepath = resource_filename(__name__, "/".join([directory, filename]))
if not os.path.isfile(filepath):
raise FileNotFoundError(f"File not found: {filepath}")
filepaths[filename[:-len(file_extension)]] = filepath
if filepaths == {}:
raise Exception("Could not find any files!")
return filepaths
```
#### File: joshunrau/qcivet/setup.py
```python
import os
from pathlib import Path
from pkg_resources import parse_requirements
from setuptools import find_packages, setup
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
PACKAGE_DIR = os.path.join(BASE_DIR, "civetqc")
def get_long_description():
with open(os.path.join(BASE_DIR, "README.md"), "r") as file:
return file.read()
def get_install_requires():
with Path('requirements.txt').open() as requirements_txt:
return [str(r) for r in parse_requirements(requirements_txt)]
setup(
name = "civetqc",
version = "0.0.4",
author="<NAME>",
author_email="<EMAIL>",
description="civetqc is a command-line utility for automated quality control of CIVET outputs",
long_description=get_long_description(),
long_description_content_type="text/markdown",
url="https://github.com/joshunrau/civetqc",
project_urls={
"Bug Tracker": "https://github.com/joshunrau/civetqc/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=find_packages(where="."),
python_requires=">=3.9",
install_requires = [
'matplotlib==3.5.2',
'numpy==1.22.4',
'pandas==1.4.2',
'scikit_learn==1.1.1',
'scikit_optimize==0.9.0',
'seaborn==0.11.2',
'setuptools==62.3.2',
'yellowbrick==1.4'
],
entry_points={
'console_scripts': [
'civetqc=civetqc.main:main'
]
},
include_package_data=True,
package_data = {
"" : [
"resources/*.json",
"resources/saved_models/*.pkl",
"resources/simulated_data/*.csv",
]
}
)
``` |
{
"source": "JoshVarty/AudioTagging",
"score": 2
} |
#### File: AudioTagging/fastai_audio/audio_databunch.py
```python
import torch
import os
import numpy as np
from fastai.core import ifnone
from fastai.vision import channel_view, normalize_funcs, Image
from fastai.basics import DataBunch, ItemList, Optional, Collection, Callable, Tensor, Iterator, PathOrStr, DatasetType
from utils import open_audio
class AudioDataBunch(DataBunch):
# Subclass because of bug to give dl_tfms to underlying dataloader
@classmethod
def create(cls, train_ds, valid_ds,
tfms:Optional[Collection[Callable]]=None, # There is a bug in LabelLists because dl_tfms is not given to dataloader
**kwargs)->'AudioDataBunch':
db = super().create(train_ds=train_ds, valid_ds=valid_ds, dl_tfms=tfms, **kwargs)
return db
def show_batch(self, rows:int=5, ds_type:DatasetType=DatasetType.Train, **kwargs):
dl = self.dl(ds_type)
ds = dl.dl.dataset
idx = np.random.choice(len(ds), size=rows, replace=False)
batch = ds[idx]
max_count = min(rows, len(batch))
xs, ys, xs_processed, ys_processed = [], [], [], []
for i in range(max_count):
x, x_processed, y, y_processed = batch[i][0], batch[i][0].data, batch[i][1], torch.tensor(batch[i][1].data)
xs.append(x)
xs_processed.append(x_processed)
ys.append(y)
ys_processed.append(y_processed)
xs_processed = torch.stack(xs_processed, dim=0)
ys_processed = torch.stack(ys_processed, dim=0)
for tfm in dl.tfms:
xs_processed, ys_processed = tfm((xs_processed, ys_processed))
self.train_ds.show_xys(xs, ys, xs_processed=xs_processed.unbind(dim=0), **kwargs)
del xs, ys, xs_processed, ys_processed
# Inspired by ImageDataBunch
def batch_stats(self, funcs:Collection[Callable]=None, ds_type:DatasetType=DatasetType.Train)->Tensor:
"Grab a batch of data and call reduction function `func` per channel"
funcs = ifnone(funcs, [torch.mean,torch.std])
x = self.one_batch(ds_type=ds_type, denorm=False)[0].cpu()
return [func(channel_view(x), 1) for func in funcs]
# Inspired by ImageDataBunch
def normalize(self, stats:Collection[Tensor]=None, do_x:bool=True, do_y:bool=False)->None:
"Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`)"
if getattr(self,'norm',False): raise Exception('Can not call normalize twice')
if stats is None: self.stats = self.batch_stats()
else: self.stats = stats
self.norm,self.denorm = normalize_funcs(*self.stats, do_x=do_x, do_y=do_y)
self.add_tfm(self.norm)
return self
# Inspired by https://docs.fast.ai/tutorial.itemlist.html
class AudioItemList(ItemList):
_bunch = AudioDataBunch # Needed to include normalize
def __init__(self, items:Iterator, using_librosa=False, downsampling=None, **kwargs):
super().__init__(items=items, **kwargs)
self.using_librosa = using_librosa
self.copy_new.append('using_librosa')
self.downsampling = downsampling
self.copy_new.append('downsampling')
def get(self, i):
fn = super().get(i)
return open_audio(fn, using_librosa=self.using_librosa, downsampling=self.downsampling)
@classmethod
def from_df(cls, df, path, using_librosa=False, folder:PathOrStr=None, downsampling=None, **kwargs):
#if folder is not None: path2 = str(path)+folder
res = super().from_df(df, path=path, **kwargs)
pref = f'{res.path}{os.path.sep}'
if folder is not None: pref += f'{folder}{os.path.sep}'
res.items = np.char.add(pref, res.items.astype(str))
res.using_librosa=using_librosa
res.downsampling = downsampling
return res
def reconstruct(self, t:Tensor, x:Tensor = None):
raise Exception("Not implemented yet")
# From torch
#return ImagePoints(FlowField(x.size, t), scale=False)
def show_xys(self, xs, ys, xs_processed=None, figsize=None, **kwargs):
if xs_processed is None:
for x, y in zip(xs, ys):
x.show(title=str(y), figsize=figsize, **kwargs)
else:
for x, y, x_processed in zip(xs, ys, xs_processed):
x.show(title=str(y), figsize=figsize, **kwargs)
for channel in range(x_processed.size(0)):
Image(x_processed[channel, :, :].unsqueeze(0)).show(figsize=figsize)
``` |
{
"source": "JoshVarty/GoogleQALabeling",
"score": 2
} |
#### File: GoogleQALabeling/src/basic_model.py
```python
import os
import re
import gc
import time
import spacy
import random
import pickle
import transformers
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import keras.backend as K
import tensorflow_hub as hub
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
from scipy.stats import spearmanr
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader, Dataset
from sklearn.model_selection import train_test_split, cross_val_score, KFold
# added preprocessing from https://www.kaggle.com/wowfattie/3rd-place/data
def sigmoid(x):
return 1 / (1 + np.exp(-x))
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£',
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '\n', '\xa0', '\t',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '\u3000', '\u202f',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '«',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ]
mispell_dict = {"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"couldnt" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"doesnt" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"havent" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"shouldnt" : "should not",
"that's" : "that is",
"thats" : "that is",
"there's" : "there is",
"theres" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"theyre": "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not",
"tryin'":"trying"}
def clean_text(x):
x = str(x)
for punct in puncts:
x = x.replace(punct, f' {punct} ')
return x
def clean_numbers(x):
x = re.sub('[0-9]{5,}', '#####', x)
x = re.sub('[0-9]{4}', '####', x)
x = re.sub('[0-9]{3}', '###', x)
x = re.sub('[0-9]{2}', '##', x)
return x
def _get_mispell(mispell_dict):
mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys()))
return mispell_dict, mispell_re
def replace_typical_misspell(text):
mispellings, mispellings_re = _get_mispell(mispell_dict)
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, text)
def clean_data(df, columns: list):
for col in columns:
df[col] = df[col].apply(lambda x: clean_numbers(x))
df[col] = df[col].apply(lambda x: clean_text(x.lower()))
df[col] = df[col].apply(lambda x: replace_typical_misspell(x))
return df
def get_coefs(word,*arr):
return word, np.asarray(arr, dtype='float32')
def load_embeddings(path):
with open(path,'rb') as f:
emb_arr = pickle.load(f)
return emb_arr
def build_matrix_adv(embedding_path: str = '',
embedding_path_spellcheck: str = r'f:\embeddings\wiki-news-300d-1M\wiki-news-300d-1M.vec',
word_dict: dict = None, lemma_dict: dict = None, max_features: int = 100000,
embed_size: int= 300, ):
spell_model = gensim.models.KeyedVectors.load_word2vec_format(embedding_path_spellcheck)
words = spell_model.index2word
w_rank = {}
for i, word in enumerate(words):
w_rank[word] = i
WORDS = w_rank
def P(word):
"Probability of `word`."
# use inverse of rank as proxy
# returns 0 if the word isn't in the dictionary
return - WORDS.get(word, 0)
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
def singlify(word):
return "".join([letter for i, letter in enumerate(word) if i == 0 or letter != word[i - 1]])
# embedding_index = dict(get_coefs(*o.strip().split(" ")) for o in open(embedding_path, encoding='utf-8'))
# embedding_index = dict(get_coefs(*o.strip().split(" ")) for o in open(embedding_path, encoding='utf-8', errors='ignore'))
embedding_index = load_embeddings(embedding_path)
nb_words = min(max_features, len(word_dict))
embedding_matrix = np.zeros((nb_words + 1, embed_size))
unknown_words = []
for word, i in word_dict.items():
key = word
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
continue
embedding_vector = embedding_index.get(word.lower())
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
continue
embedding_vector = embedding_index.get(word.upper())
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
continue
embedding_vector = embedding_index.get(word.capitalize())
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
continue
word = ps.stem(key)
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[word_dict[key]] = embedding_vector
continue
word = lc.stem(key)
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[word_dict[key]] = embedding_vector
continue
word = sb.stem(key)
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[word_dict[key]] = embedding_vector
continue
word = lemma_dict[key]
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[word_dict[key]] = embedding_vector
continue
if len(key) > 1:
word = correction(key)
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[word_dict[key]] = embedding_vector
continue
unknown_words.append(key)
print(f'{len(unknown_words) * 100 / len(word_dict):.4f}% words are not in embeddings')
return embedding_matrix, nb_words, unknown_words
def get_word_lemma_dict(full_text: list = None, ):
nlp = spacy.load('en_core_web_lg', disable=['parser','ner','tagger'])
nlp.vocab.add_flag(lambda s: s.lower() in spacy.lang.en.stop_words.STOP_WORDS, spacy.attrs.IS_STOP)
word_dict = {}
word_index = 1
lemma_dict = {}
docs = nlp.pipe(full_text, n_threads = os.cpu_count())
for doc in docs:
for token in doc:
if (token.text not in word_dict) and (token.pos_ is not "PUNCT"):
word_dict[token.text] = word_index
word_index += 1
lemma_dict[token.text] = token.lemma_
return lemma_dict, word_dict
def build_matrix(embedding_path: str = '',
embedding_path_spellcheck: str = r'f:\embeddings\wiki-news-300d-1M\wiki-news-300d-1M.vec',
word_dict: dict = None, max_features: int = 100000,
embed_size: int= 300, ):
# embedding_index = dict(get_coefs(*o.strip().split(" ")) for o in open(embedding_path, encoding='utf-8'))
embedding_index = load_embeddings(embedding_path)
nb_words = min(max_features, len(word_dict))
embedding_matrix = np.zeros((nb_words + 1, embed_size))
unknown_words = []
for word, i in word_dict.items():
key = word
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
continue
embedding_vector = embedding_index.get(word.lower())
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
continue
embedding_vector = embedding_index.get(word.upper())
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
continue
embedding_vector = embedding_index.get(word.capitalize())
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
continue
unknown_words.append(key)
print(f'{len(unknown_words) * 100 / len(word_dict):.4f}% words are not in embeddings')
return embedding_matrix, nb_words, unknown_words
def seed_everything(seed=1234):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def fetch_vectors(string_list, batch_size=64, max_len=512):
# inspired by https://jalammar.github.io/a-visual-guide-to-using-bert-for-the-first-time/
DEVICE = torch.device("cuda")
tokenizer = transformers.DistilBertTokenizer.from_pretrained("../input/distilbertbaseuncased/")
model = transformers.DistilBertModel.from_pretrained("../input/distilbertbaseuncased/")
model.to(DEVICE)
fin_features = []
for data in chunks(string_list, batch_size):
tokenized = []
for x in data:
x = " ".join(x.strip().split()[:500])
tok = tokenizer.encode(x, add_special_tokens=True)
tokenized.append(tok[:max_len])
padded = np.array([i + [0] * (max_len - len(i)) for i in tokenized])
attention_mask = np.where(padded != 0, 1, 0)
input_ids = torch.tensor(padded).to(DEVICE)
attention_mask = torch.tensor(attention_mask).to(DEVICE)
with torch.no_grad():
last_hidden_states = model(input_ids, attention_mask=attention_mask)
features = last_hidden_states[0][:, 0, :].cpu().numpy()
fin_features.append(features)
fin_features = np.vstack(fin_features)
return fin_features
def get_embedding_features(train, test, input_columns, only_test=False, batch_size=4):
"""
https://www.kaggle.com/ragnar123/simple-lgbm-solution-baseline?scriptVersionId=24198335
"""
# load universal sentence encoder model to get sentence ambeddings
module_url = "../data/universalsentenceencoderlarge4/"
embed = hub.load(module_url)
# create empty dictionaries to store final results
if not only_test:
embedding_train = {}
embedding_test = {}
# iterate over text columns to get senteces embeddings with the previous loaded model
for text in input_columns:
print(text)
if not only_test:
train_text = train[text].str.replace('?', '.').str.replace('!', '.').tolist()
test_text = test[text].str.replace('?', '.').str.replace('!', '.').tolist()
# create empy list to save each batch
curr_train_emb = []
curr_test_emb = []
# define a batch to transform senteces to their correspinding embedding (1 X 512 for each sentece)
if not only_test:
ind = 0
while ind * batch_size < len(train_text):
curr_train_emb.append(embed(train_text[ind * batch_size: (ind + 1) * batch_size])['outputs'].numpy())
ind += 1
ind = 0
while ind * batch_size < len(test_text):
curr_test_emb.append(embed(test_text[ind * batch_size: (ind + 1) * batch_size])['outputs'].numpy())
ind += 1
# stack arrays to get a 2D array (dataframe) corresponding with all the sentences and dim 512 for columns (sentence encoder output)
if not only_test:
embedding_train[text + '_embedding'] = np.vstack(curr_train_emb)
embedding_test[text + '_embedding'] = np.vstack(curr_test_emb)
del embed
K.clear_session()
gc.collect()
if only_test:
return embedding_test
else:
return embedding_train, embedding_test
def get_dist_features(embedding_train, embedding_test):
# define a square dist lambda function were (x1 - y1) ^ 2 + (x2 - y2) ^ 2 + (x3 - y3) ^ 2 + ... + (xn - yn) ^ 2
# with this we get one vector of dimension 6079
l2_dist = lambda x, y: np.power(x - y, 2).sum(axis=1)
# define a cosine dist lambda function were (x1 * y1) ^ 2 + (x2 * y2) + (x3 * y3) + ... + (xn * yn)
cos_dist = lambda x, y: (x * y).sum(axis=1)
# transpose it because we have 6 vector of dimension 6079, need 6079 x 6
dist_features_train = np.array([
l2_dist(embedding_train['question_title_embedding'], embedding_train['answer_embedding']),
l2_dist(embedding_train['question_body_embedding'], embedding_train['answer_embedding']),
l2_dist(embedding_train['question_body_embedding'], embedding_train['question_title_embedding']),
cos_dist(embedding_train['question_title_embedding'], embedding_train['answer_embedding']),
cos_dist(embedding_train['question_body_embedding'], embedding_train['answer_embedding']),
cos_dist(embedding_train['question_body_embedding'], embedding_train['question_title_embedding'])]).T
# transpose it because we have 6 vector of dimension 6079, need 6079 x 6
dist_features_test = np.array([
l2_dist(embedding_test['question_title_embedding'], embedding_test['answer_embedding']),
l2_dist(embedding_test['question_body_embedding'], embedding_test['answer_embedding']),
l2_dist(embedding_test['question_body_embedding'], embedding_test['question_title_embedding']),
cos_dist(embedding_test['question_title_embedding'], embedding_test['answer_embedding']),
cos_dist(embedding_test['question_body_embedding'], embedding_test['answer_embedding']),
cos_dist(embedding_test['question_body_embedding'], embedding_test['question_title_embedding'])]).T
return dist_features_train, dist_features_test
# training the model
def train_model(model, train_loader, valid_loader, n_epochs=3, lr=0.001):
optimizer = torch.optim.Adam(model.parameters(), lr)
patience = 2
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=1, factor=0.1)
loss_fn = torch.nn.BCEWithLogitsLoss(reduction='mean').cuda()
best_score = 0
for epoch in range(n_epochs):
start_time = time.time()
model.train()
avg_loss = 0.
for question, answer, title, category, host, use_emb_q, use_emb_a, use_emb_t, dist_feature, y_batch in tqdm(
train_loader, disable=True):
question = question.long().cuda()
answer = answer.long().cuda()
title = title.long().cuda()
category = category.long().cuda()
host = host.long().cuda()
use_emb_q = use_emb_q.cuda()
use_emb_a = use_emb_a.cuda()
use_emb_t = use_emb_t.cuda()
dist_feature = dist_feature.cuda()
y_batch = y_batch.cuda()
y_pred = model(question, answer, title, category, host, use_emb_q, use_emb_a, use_emb_t, dist_feature)
loss = loss_fn(y_pred.double(), y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item() / len(train_loader)
model.eval()
avg_val_loss = 0.
preds = []
original = []
for i, (
question, answer, title, category, host, use_emb_q, use_emb_a, use_emb_t, dist_feature, y_batch) in enumerate(
valid_loader):
question = question.long().cuda()
answer = answer.long().cuda()
title = title.long().cuda()
category = category.long().cuda()
host = host.long().cuda()
use_emb_q = use_emb_q.cuda()
use_emb_a = use_emb_a.cuda()
use_emb_t = use_emb_t.cuda()
dist_feature = dist_feature.cuda()
y_batch = y_batch.cuda()
y_pred = model(question, answer, title, category, host, use_emb_q, use_emb_a, use_emb_t,
dist_feature).detach()
avg_val_loss += loss_fn(y_pred.double(), y_batch).item() / len(valid_loader)
preds.append(y_pred.cpu().numpy())
original.append(y_batch.cpu().numpy())
score = 0
for i in range(30):
score += np.nan_to_num(
spearmanr(np.concatenate(original)[:, i], np.concatenate(preds)[:, i]).correlation / 30)
elapsed_time = time.time() - start_time
print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t spearman={:.2f} \t time={:.2f}s'.format(
epoch + 1, n_epochs, avg_loss, avg_val_loss, score, elapsed_time))
scheduler.step(avg_val_loss)
valid_score = score
if valid_score > best_score:
best_score = valid_score
p = 0
torch.save(model.state_dict(), 'model.pt')
# check if validation loss didn't improve
if valid_score <= best_score:
p += 1
print(f'{p} epochs of non improving score')
if p > patience:
print('Stopping training')
stop = True
break
model.load_state_dict(torch.load('model.pt'))
return model
def make_prediction(test_loader: DataLoader = None, model=None):
prediction = np.zeros((len(test_loader.dataset), 30))
model.eval()
for i, (question, answer, title, category, host, use_emb_q, use_emb_a, use_emb_t, dist_feature, _) in enumerate(
test_loader):
start_index = i * test_loader.batch_size
end_index = min(start_index + test_loader.batch_size, len(test_loader.dataset))
question = question.long().cuda()
answer = answer.long().cuda()
title = title.long().cuda()
category = category.long().cuda()
host = host.long().cuda()
use_emb_q = use_emb_q.cuda()
use_emb_a = use_emb_a.cuda()
use_emb_t = use_emb_t.cuda()
dist_feature = dist_feature.cuda()
y_pred = model(question, answer, title, category, host, use_emb_q, use_emb_a, use_emb_t, dist_feature).detach()
y_pred = torch.sigmoid(y_pred)
prediction[start_index:end_index, :] += y_pred.detach().cpu().numpy()
return prediction
class Attention(nn.Module):
def __init__(self, feature_dim, step_dim, bias=True, **kwargs):
super(Attention, self).__init__(**kwargs)
self.supports_masking = True
self.bias = bias
self.feature_dim = feature_dim
self.step_dim = step_dim
self.features_dim = 0
weight = torch.zeros(feature_dim, 1)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight)
if bias:
self.b = nn.Parameter(torch.zeros(step_dim))
def forward(self, x, mask=None):
feature_dim = self.feature_dim
step_dim = self.step_dim
eij = torch.mm(
x.contiguous().view(-1, feature_dim),
self.weight
).view(-1, step_dim)
if self.bias:
eij = eij + self.b
eij = torch.tanh(eij)
a = torch.exp(eij)
if mask is not None:
a = a * mask
a = a / torch.sum(a, 1, keepdim=True) + 1e-10
weighted_input = x * torch.unsqueeze(a, -1)
return torch.sum(weighted_input, 1)
class GELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class SpatialDropout(nn.Dropout2d):
def forward(self, x):
x = x.unsqueeze(2) # (N, T, 1, K)
x = x.permute(0, 3, 2, 1) # (N, K, 1, T)
x = super(SpatialDropout, self).forward(x) # (N, K, 1, T), some features are masked
x = x.permute(0, 3, 2, 1) # (N, T, 1, K)
x = x.squeeze(2) # (N, T, K)
return x
class Mish(nn.Module):
"""
Mish - "Mish: A Self Regularized Non-Monotonic Neural Activation Function"
https://arxiv.org/abs/1908.08681v1
implemented for PyTorch / FastAI by lessw2020
github: https://github.com/lessw2020/mish
"""
def __init__(self):
super().__init__()
def forward(self, x):
# inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!)
return x * (torch.tanh(F.softplus(x)))
class NeuralNet5(nn.Module):
def __init__(self,
hidden_size: int = 128,
max_len: int = 500,
max_len_title: int = 30,
n_cat: int = 3,
cat_emb: int = 6,
n_host: int = 55,
host_emb: int = 28,
additional_embedding_shape: int = 512,
embedding_matrix=None):
super(NeuralNet5, self).__init__()
self.embedding = nn.Embedding(*embedding_matrix.shape)
self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))
self.embedding.weight.requires_grad = False
self.embedding_dropout = SpatialDropout(0.3)
self.category_embedding = nn.Embedding(n_cat, int(cat_emb))
self.host_embedding = nn.Embedding(n_host, int(host_emb))
self.linear_q_add = nn.Linear(300, 128)
self.linear_q_add1 = nn.Linear(128, 30)
self.bilinear_add = nn.Bilinear(30, 30, 30)
self.lstm_q = nn.LSTM(300, hidden_size, bidirectional=True, batch_first=True)
self.gru_q = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True)
self.lstm_a = nn.LSTM(300, hidden_size, bidirectional=True, batch_first=True)
self.gru_a = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True)
self.lstm_t = nn.LSTM(300, hidden_size, bidirectional=True, batch_first=True)
self.gru_t = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True)
self.lstm_attention_q = Attention(hidden_size * 2, max_len)
self.gru_attention_q = Attention(hidden_size * 2, max_len)
self.lstm_attention_a = Attention(hidden_size * 2, max_len)
self.gru_attention_a = Attention(hidden_size * 2, max_len)
self.lstm_attention_t = Attention(hidden_size * 2, max_len_title)
self.gru_attention_t = Attention(hidden_size * 2, max_len_title)
self.linear_q = nn.Linear(1024, 64)
self.relu_q = Mish()
self.linear_a = nn.Linear(1024, 64)
self.relu_a = Mish()
self.linear_t = nn.Linear(1024, 64)
self.relu_t = Mish()
self.linear_q_emb = nn.Linear(additional_embedding_shape, 64)
self.relu_q_emb = Mish()
self.linear_a_emb = nn.Linear(additional_embedding_shape, 64)
self.relu_a_emb = Mish()
self.linear_t_emb = nn.Linear(additional_embedding_shape, 64)
self.relu_t_emb = Mish()
self.linear1 = nn.Sequential(nn.Linear(256 + int(cat_emb) + int(host_emb) + 6, 64),
nn.BatchNorm1d(64),
nn.ReLU(inplace=True),
nn.Dropout(0.5))
self.linear_q_out = nn.Linear(64, 21)
self.bilinear = nn.Bilinear(64, 64, 64)
self.bilinear_emb = nn.Bilinear(64, 64, 64)
self.linear2 = nn.Sequential(nn.Linear(390, 64),
nn.BatchNorm1d(64),
nn.ReLU(inplace=True),
nn.Dropout(0.5))
self.linear_aq_out = nn.Linear(64, 9)
def forward(self, question, answer, title, category, host, use_emb_q, use_emb_a, use_emb_t, dist_feature):
h_embedding_q = self.embedding(question)
h_embedding_q = self.embedding_dropout(h_embedding_q)
h_lstm_q, _ = self.lstm_q(h_embedding_q)
h_gru_q, _ = self.gru_q(h_lstm_q)
h_lstm_atten_q = self.lstm_attention_q(h_lstm_q)
h_gru_atten_q = self.gru_attention_q(h_gru_q)
avg_pool_q = torch.mean(h_gru_q, 1)
max_pool_q, _ = torch.max(h_gru_q, 1)
h_embedding_a = self.embedding(answer)
h_embedding_a = self.embedding_dropout(h_embedding_a)
h_lstm_a, _ = self.lstm_a(h_embedding_a)
h_gru_a, _ = self.gru_a(h_lstm_a)
h_lstm_atten_a = self.lstm_attention_a(h_lstm_a)
h_gru_atten_a = self.gru_attention_a(h_gru_a)
avg_pool_a = torch.mean(h_gru_a, 1)
max_pool_a, _ = torch.max(h_gru_a, 1)
h_embedding_t = self.embedding(title)
h_embedding_t = self.embedding_dropout(h_embedding_t)
h_lstm_t, _ = self.lstm_t(h_embedding_t)
h_gru_t, _ = self.gru_t(h_lstm_t)
h_lstm_atten_t = self.lstm_attention_t(h_lstm_t)
h_gru_atten_t = self.gru_attention_t(h_gru_t)
avg_pool_t = torch.mean(h_gru_t, 1)
max_pool_t, _ = torch.max(h_gru_t, 1)
category = self.category_embedding(category)
host = self.host_embedding(host)
add = torch.cat((h_embedding_q, h_embedding_a, h_embedding_t), 1)
add = self.linear_q_add(torch.mean(add, 1))
add = self.linear_q_add1(add)
q = torch.cat((h_lstm_atten_q, h_gru_atten_q, avg_pool_q, max_pool_q), 1)
a = torch.cat((h_lstm_atten_a, h_gru_atten_a, avg_pool_a, max_pool_a), 1)
t = torch.cat((h_lstm_atten_t, h_gru_atten_t, avg_pool_t, max_pool_t), 1)
q = self.relu_q(self.linear_q(q))
a = self.relu_a(self.linear_a(a))
t = self.relu_t(self.linear_t(t))
q_emb = self.relu_q_emb(self.linear_q_emb(use_emb_q))
a_emb = self.relu_a_emb(self.linear_a_emb(use_emb_a))
t_emb = self.relu_t_emb(self.linear_t_emb(use_emb_t))
hidden_q = self.linear1(torch.cat((q, t, q_emb, t_emb, category, host, dist_feature), 1))
q_result = self.linear_q_out(hidden_q)
bil_sim = self.bilinear(q, a)
bil_sim_emb = self.bilinear_emb(q_emb, a_emb)
hidden_aq = self.linear2(torch.cat((q, a, q_emb, a_emb, bil_sim, bil_sim_emb, dist_feature), 1))
aq_result = self.linear_aq_out(hidden_aq)
out = torch.cat([q_result, aq_result], 1)
out = self.bilinear_add(out, add)
return out
class TextDataset(Dataset):
def __init__(self, question_data, answer_data, title_data, category_data, host_data, use_embeddings, dist_features, idxs, targets=None):
self.question_data = question_data[idxs]
self.answer_data = answer_data[idxs]
self.title_data = title_data[idxs]
self.category_data = category_data[idxs]
self.host_data = host_data[idxs]
self.use_embeddings_q = use_embeddings['question_body_embedding'][idxs]
self.use_embeddings_a = use_embeddings['answer_embedding'][idxs]
self.use_embeddings_t = use_embeddings['question_title_embedding'][idxs]
self.dist_features = dist_features[idxs]
self.targets = targets[idxs] if targets is not None else np.zeros((self.question_data.shape[0], 30))
def __getitem__(self, idx):
question = self.question_data[idx]
answer = self.answer_data[idx]
title = self.title_data[idx]
category = self.category_data[idx]
host = self.host_data[idx]
use_emb_q = self.use_embeddings_q[idx]
use_emb_a = self.use_embeddings_a[idx]
use_emb_t = self.use_embeddings_t[idx]
dist_feature = self.dist_features[idx]
target = self.targets[idx]
return question, answer, title, category, host, use_emb_q, use_emb_a, use_emb_t, dist_feature, target
def __len__(self):
return len(self.question_data)
def main():
path = '../data'
sample_submission = pd.read_csv(f'{path}/sample_submission.csv')
test = pd.read_csv(f'{path}/test.csv').fillna(' ')
train = pd.read_csv(f'{path}/train.csv').fillna(' ')
# TODO, do we really want this?
train = clean_data(train, ['answer', 'question_body', 'question_title'])
test = clean_data(test, ['answer', 'question_body', 'question_title'])
seed_everything()
# %%time
embedding_test = get_embedding_features(train, test, ['answer', 'question_body', 'question_title'], only_test=True)
embedding_train = {}
embedding_train['answer_embedding'] = np.load(
'../data/qa-labeling-files-for-inference/embedding_train_answer_embedding.npy', allow_pickle=True)
embedding_train['question_body_embedding'] = np.load(
'../data/qa-labeling-files-for-inference/embedding_train_question_body_embedding.npy', allow_pickle=True)
embedding_train['question_title_embedding'] = np.load(
'../data/qa-labeling-files-for-inference/embedding_train_question_title_embedding.npy', allow_pickle=True)
dist_features_train, dist_features_test = get_dist_features(embedding_train, embedding_test)
tokenizer = Tokenizer()
full_text = list(train['question_body']) + \
list(train['answer']) + \
list(train['question_title']) + \
list(test['question_body']) + \
list(test['answer']) + \
list(test['question_title'])
tokenizer.fit_on_texts(full_text)
embed_size = 300
embedding_path = "../data/pickled-crawl300d2m-for-kernel-competitions/crawl-300d-2M.pkl"
#lemma_dict, word_dict = get_word_lemma_dict(full_text)
embedding_matrix, nb_words, unknown_words = build_matrix(embedding_path,
'../data/wikinews300d1mvec/wiki-news-300d-1M.vec',
tokenizer.word_index,
100000, embed_size)
unique_hosts = list(set(train['host'].unique().tolist() + test['host'].unique().tolist()))
host_dict = {i + 1: e for i, e in enumerate(unique_hosts)}
host_dict_reverse = {v: k for k, v in host_dict.items()}
unique_categories = list(set(train['category'].unique().tolist() + test['category'].unique().tolist()))
category_dict = {i + 1: e for i, e in enumerate(unique_categories)}
category_dict_reverse = {v: k for k, v in category_dict.items()}
max_len = 500 # TODO: Is this appropriate
max_len_title = 30
train_question_tokenized = pad_sequences(tokenizer.texts_to_sequences(train['question_body']), maxlen=max_len)
train_answer_tokenized = pad_sequences(tokenizer.texts_to_sequences(train['answer']), maxlen=max_len)
train_title_tokenized = pad_sequences(tokenizer.texts_to_sequences(train['question_title']), maxlen=max_len_title)
test_question_tokenized = pad_sequences(tokenizer.texts_to_sequences(test['question_body']), maxlen=max_len)
test_answer_tokenized = pad_sequences(tokenizer.texts_to_sequences(test['answer']), maxlen=max_len)
test_title_tokenized = pad_sequences(tokenizer.texts_to_sequences(test['question_title']), maxlen=max_len_title)
train_host = train['host'].apply(lambda x: host_dict_reverse[x]).values
train_category = train['category'].apply(lambda x: category_dict_reverse[x]).values
test_host = test['host'].apply(lambda x: host_dict_reverse[x]).values
test_category = test['category'].apply(lambda x: category_dict_reverse[x]).values
y = train[sample_submission.columns[1:]].values
num_workers = 0
bs = 8
n_cat = len(category_dict) + 1
cat_emb = min(np.ceil((len(category_dict)) / 2), 50)
n_host = len(host_dict) + 1
host_emb = min(np.ceil((len(host_dict)) / 2), 50)
bs_test = 16
test_loader = DataLoader(TextDataset(test_question_tokenized, test_answer_tokenized, test_title_tokenized,
test_category, test_host, embedding_test, dist_features_test, test.index),
batch_size=bs_test, shuffle=False, num_workers=num_workers)
folds = KFold(n_splits=2, random_state=42)
preds = np.zeros((len(test), 30))
for fold_n, (train_index, valid_index) in enumerate(folds.split(train)):
print(f'Fold {fold_n + 1} started at {time.ctime()}')
train_loader = DataLoader(
TextDataset(train_question_tokenized, train_answer_tokenized, train_title_tokenized, train_category,
train_host, embedding_train,
dist_features_train, train_index, y),
batch_size=bs, shuffle=True, num_workers=num_workers, pin_memory=True)
valid_loader = DataLoader(
TextDataset(train_question_tokenized, train_answer_tokenized, train_title_tokenized, train_category,
train_host, embedding_train,
dist_features_train, valid_index, y),
batch_size=bs, shuffle=False, num_workers=num_workers, pin_memory=True)
model = NeuralNet5(embedding_matrix=embedding_matrix,
n_cat=n_cat,
cat_emb=cat_emb,
n_host=n_host,
host_emb=host_emb)
model.cuda()
model = train_model(model, train_loader, valid_loader, n_epochs=3, lr=0.001)
prediction = make_prediction(test_loader, model)
preds += prediction / folds.n_splits / 2
gc.collect()
torch.cuda.empty_cache()
print()
if __name__ == '__main__':
main()
``` |
{
"source": "JoshVarty/KaggleClouds",
"score": 3
} |
#### File: KaggleClouds/src/utils.py
```python
import cv2
import torch
import torch.nn as nn
import numpy as np
from PIL import Image
from tqdm import tqdm
from fastai.vision import pil2tensor, ImageSegment, SegmentationLabelList
def get_training_image_size(original_size, multiple=32):
"""
Our inputs to the network must by multiples of 32.
We'll find the closest size that both a multiple of 32 and greater than the image size
"""
new_sizes = []
for dimension_size in original_size:
for j in range(20):
candidate_size = multiple * j
if candidate_size > dimension_size:
new_sizes.append(candidate_size)
break
if len(new_sizes) != len(original_size):
raise Exception("Could not find a valid size for the image")
return tuple(new_sizes)
def multiclass_dice_probs(probs, targets, iou=False, eps=1e-8):
"""
Dice coefficient metric for multiclass binary target with probs
"""
n = targets.shape[0] # Batch size of 4
# Flatten logits and targets
probs = probs.view(n, -1)
targets = targets.view(n, -1).float()
# Convert logits to probabilities
intersect = (probs * targets).sum(dim=1).float()
union = (probs + targets).sum(dim=1).float()
if not iou:
l = 2. * intersect / union
else:
l = intersect / (union - intersect + eps)
# The Dice coefficient is defined to be 1 when both X and Y are empty.
# That said, we'd get a divide-by-zero-exception if union was 0 anyways...
l[union == 0.] = 1.
return l.mean()
pass
def multiclass_dice(logits, targets, iou=False, eps=1e-8):
"""
Dice coefficient metric for multiclass binary target.
If iou=True, returns iou metric, classic for segmentation problems.
"""
n = targets.shape[0] # Batch size of 4
# Flatten logits and targets
logits = logits.view(n, -1)
targets = targets.view(n, -1).float()
# Convert logits to probabilities
probs = torch.sigmoid(logits)
intersect = (probs * targets).sum(dim=1).float()
union = (probs + targets).sum(dim=1).float()
if not iou:
l = 2. * intersect / union
else:
l = intersect / (union - intersect + eps)
# The Dice coefficient is defined to be 1 when both X and Y are empty.
# That said, we'd get a divide-by-zero-exception if union was 0 anyways...
l[union == 0.] = 1.
return l.mean()
def multiclass_dice_threshold(logits, targets, threshold=0.5, iou=False, eps=1e-8):
"""
Dice coefficient metric for multiclass binary target.
If iou=True, returns iou metric, classic for segmentation problems.
"""
n = targets.shape[0] # Batch size of 4
# Flatten logits and targets
logits = logits.view(n, -1)
targets = targets.view(n, -1).float()
# Convert logits to probabilities
probs = torch.sigmoid(logits)
preds = probs
preds[preds >= threshold] = 1
preds[preds < threshold] = 0
intersect = (preds * targets).sum(dim=1).float()
union = (preds + targets).sum(dim=1).float()
if not iou:
l = 2. * intersect / union
else:
l = intersect / (union - intersect + eps)
# The Dice coefficient is defined to be 1 when both X and Y are empty.
# That said, we'd get a divide-by-zero-exception if union was 0 anyways...
l[union == 0.] = 1.
return l.mean()
def override_open_mask():
# Our masks are overlapping so we've represented the masks as 4-channel images
# This is convenient for us because we can still store them in standard RGBA images
# However we have to make sure we load these images as RGBA in order for them to work
def custom_open_mask(filename, div=False, convert_mode='L', after_open=None):
x = Image.open(filename).convert('RGBA')
if after_open:
x = after_open(x)
x = pil2tensor(x, np.float32)
return ImageSegment(x)
def custom_open(self, fn):
return custom_open_mask(fn)
# Open image with our custom method
SegmentationLabelList.open = custom_open
def rle_decode(mask_rle: str = '', shape: tuple = (1400, 2100)):
"""
Decode rle encoded mask.
:param mask_rle: run-length as string formatted (start length)
:param shape: (height, width) of array to return
Returns numpy array, 1 - mask, 0 - background
"""
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0] * shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape, order='F')
def mask2rle(img):
"""
Convert mask to rle.
img: numpy array, 1 - mask, 0 - background
Returns run length as string formatted
"""
pixels = img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def post_process(probability, threshold, min_size):
"""
Post processing of each predicted mask, components with lesser number of pixels
than `min_size` are ignored
"""
# don't remember where I saw it
mask = cv2.threshold(probability, threshold, 1, cv2.THRESH_BINARY)[1]
num_component, component = cv2.connectedComponents(mask.astype(np.uint8))
predictions = np.zeros(probability.shape, np.float32)
num = 0
for c in range(1, num_component):
p = (component == c)
if p.sum() > min_size:
predictions[p] = 1
num += 1
return predictions, num
def convert_mask_to_rle(mask, threshold, min_size):
preds, num_preds = post_process(mask, threshold, min_size)
rle_mask = mask2rle(preds)
return rle_mask
def convert_masks_to_rle(test, test_preds, threshold, min_size):
unique_test_images = test.iloc[::4, :]
print(len(unique_test_images))
for i, row in tqdm(unique_test_images.iterrows()):
saved_pred = test_preds[i // 4]
fish_rle = convert_mask_to_rle(saved_pred[0], threshold, min_size)
flower_rle = convert_mask_to_rle(saved_pred[1], threshold, min_size)
gravel_rle = convert_mask_to_rle(saved_pred[2], threshold, min_size)
sugar_rle = convert_mask_to_rle(saved_pred[3], threshold, min_size)
# Save in dataframe
test.loc[test['Image_Label'] == row['im_id'] + "_Fish", 'EncodedPixels'] = fish_rle
test.loc[test['Image_Label'] == row['im_id'] + "_Flower", 'EncodedPixels'] = flower_rle
test.loc[test['Image_Label'] == row['im_id'] + "_Gravel", 'EncodedPixels'] = gravel_rle
test.loc[test['Image_Label'] == row['im_id'] + "_Sugar", 'EncodedPixels'] = sugar_rle
return test
def create_channel_from_rle(encoded_pixels, shape):
new_channel = np.zeros(shape[0] * shape[1], dtype=np.uint8)
s = encoded_pixels.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
for lo, hi in zip(starts, ends):
new_channel[lo:hi] = 1
new_channel = new_channel.reshape(shape, order='F')
return new_channel
def create_mask_for_image(df, index):
fish = df.iloc[index]
flower = df.iloc[index + 1]
gravel = df.iloc[index + 2]
sugar = df.iloc[index + 3]
full_path = "data/train_images/" + fish['im_id']
im = Image.open(full_path)
shape = im.size
# shape = (1400, 2100)
fish_channel = np.zeros(shape, dtype=np.uint8)
flower_channel = np.zeros(shape, dtype=np.uint8)
gravel_channel = np.zeros(shape, dtype=np.uint8)
sugar_channel = np.zeros(shape, dtype=np.uint8)
if isinstance(fish['EncodedPixels'], str):
fish_channel = create_channel_from_rle(fish['EncodedPixels'], shape)
if isinstance(flower['EncodedPixels'], str):
flower_channel = create_channel_from_rle(flower['EncodedPixels'], shape)
if isinstance(gravel['EncodedPixels'], str):
gravel_channel = create_channel_from_rle(gravel['EncodedPixels'], shape)
if isinstance(sugar['EncodedPixels'], str):
sugar_channel = create_channel_from_rle(sugar['EncodedPixels'], shape)
# Create fake RGBA image
new_image = np.stack([fish_channel, flower_channel, gravel_channel, sugar_channel], axis=-1)
return new_image
def dice_coef(input, target, threshold=None):
smooth = 1.0
input_flatten = input.view(-1)
if threshold is not None:
input_flatten = (input_flatten > threshold).float()
target_flatten = target.view(-1)
intersection = (input_flatten * target_flatten).sum()
return (
(2. * intersection + smooth) /
(input_flatten.sum() + target_flatten.sum() + smooth)
)
class DiceLoss(nn.Module):
def __init__(self, log=False):
super().__init__()
self.log = log
def forward(self, input, target):
dice_coef_value = dice_coef(torch.sigmoid(input), target)
if self.log:
return -torch.log(dice_coef_value)
else:
return 1 - dice_coef_value
class BCEDiceLoss(nn.Module):
def __init__(self, log_dice=False):
super().__init__()
self.bce_loss = nn.BCEWithLogitsLoss()
self.dice_loss = DiceLoss(log=log_dice)
def forward(self, input, target):
target = target.float()
b_loss = self.bce_loss(input, target)
d_loss = self.dice_loss(input, target)
return b_loss + d_loss
``` |
{
"source": "JoshVarty/Reacher",
"score": 3
} |
#### File: JoshVarty/Reacher/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class FCNetwork(nn.Module):
def __init__(self, state_size, action_size, output_gate=None):
super(FCNetwork, self).__init__()
self.fc1 = nn.Linear(state_size, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, action_size)
self.output_gate = output_gate
def forward(self, input):
x = F.relu(self.fc1(input))
x = F.relu(self.fc2(x))
x = self.fc3(x)
if self.output_gate is not None:
x = self.output_gate(x)
return x
class ActorCriticNetwork(nn.Module):
def __init__(self, state_size, action_size):
super(ActorCriticNetwork, self).__init__()
self.actor = FCNetwork(state_size, action_size, F.tanh)
self.critic = FCNetwork(state_size, 1)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.std = nn.Parameter(torch.ones(1, action_size)).to(self.device)
self.to(self.device)
def forward(self, state, action = None):
#state = torch.Tensor(state).to(self.device)
#Get action
a = self.actor(state)
distribution = torch.distributions.Normal(a, self.std)
if action is None:
action = distribution.sample()
log_prob = distribution.log_prob(action)
log_prob = torch.sum(log_prob, dim=1, keepdim=True)
#Get value from critic
value = self.critic(state)
return action, log_prob, value
``` |
{
"source": "JoshVarty/Rosalind",
"score": 4
} |
#### File: JoshVarty/Rosalind/004_Rabbits_and_Recurrence_Relations.py
```python
lookup = {}
def get_rabbit_pairs(n, k):
if n == 0:
return 0
elif n == 1:
return 1
prev_gen = 0
prev_prev_gen = 0
if (n - 1) in lookup:
prev_gen = lookup[n - 1]
else:
prev_gen = get_rabbit_pairs(n - 1, k)
lookup[n - 1] = prev_gen
if (n - 2) in lookup:
prev_prev_gen = lookup[n - 2]
else:
prev_prev_gen = get_rabbit_pairs(n - 2, k)
lookup[n - 2] = prev_prev_gen
return prev_gen + k * prev_prev_gen
if __name__ == '__main__':
while True:
raw_input = input("Enter n and k:\n")
n, k = raw_input.split()
n = int(n)
k = int(k)
lookup = {}
number_of_pairs = get_rabbit_pairs(n, k)
print(number_of_pairs)
```
#### File: JoshVarty/Rosalind/010_Consensus_and_Profile.py
```python
import numpy as np
lookup = {
'A': 0,
'C': 1,
'G': 2,
'T': 3,
}
def get_profile_and_consensuse_for_dna_strings(dna_matrix):
consensus = ['A' for _ in range(len(dna_matrix[0]))]
consensus_max_count = np.zeros((len(dna_matrix[0]),))
profile_matrix = np.zeros((4, len(dna_matrix[0])), dtype=np.int)
# Build profile matrix
for row in range(len(dna_matrix)):
for col in range(len(dna_matrix[row])):
base = dna_matrix[row][col]
index = lookup[base]
profile_matrix[index, col] = profile_matrix[index, col] + 1
if profile_matrix[index, col] > consensus_max_count[col]:
consensus_max_count[col] = profile_matrix[index, col]
consensus[col] = base
return profile_matrix, consensus
if __name__ == '__main__':
with open('problem_10_data.txt', 'r') as file:
dna_matrix = []
current_record = ""
for fast_a_record in file:
if fast_a_record[0] == '>':
dna_matrix.append(current_record)
current_record = ""
else:
current_record = current_record + fast_a_record.strip()
dna_matrix.append(current_record)
del dna_matrix[0]
profile, consensus = get_profile_and_consensuse_for_dna_strings(dna_matrix)
print(''.join(consensus))
print("A:", ' '.join(map(str, profile[0])))
print("C:", ' '.join(map(str, profile[1])))
print("G:", ' '.join(map(str, profile[2])))
print("T:", ' '.join(map(str, profile[3])))
```
#### File: JoshVarty/Rosalind/014_Finding_a_Shared_Motif.py
```python
import numpy as np
from utils import load_fasta_file_as_lookup
from utils import SuffixNode, JoshSuffixTree
import sys
sys.setrecursionlimit(10**6)
def find_longest_common_substring(records):
records_list = [v for k,v in records.items()]
suffix_tree = JoshSuffixTree(records_list)
return lcs(suffix_tree.root)
def lcs(node: SuffixNode):
if not np.all(node.visited):
return ""
longest_string = ""
for child in node.children:
child_string = lcs(child)
if len(child_string) > len(longest_string):
longest_string = child_string
return node.value + longest_string
if __name__ == '__main__':
fasta_record_lookup = load_fasta_file_as_lookup('problem_10_data.txt')
longest_common_substring = find_longest_common_substring(fasta_record_lookup)
print(longest_common_substring)
``` |
{
"source": "JoshVarty/SorghumHeadDetection",
"score": 2
} |
#### File: SorghumHeadDetection/RetinaNet/inference.py
```python
import os
import sys
import PIL
import shutil
import pandas as pd
import numpy as np
from functools import partial
from sklearn.model_selection import KFold
import torch
import fastai
from fastai.core import is_tuple
from fastai.train import ShowGraph
from fastai.vision import Path, open_image, ImageBBox, ObjectItemList, get_transforms, bb_pad_collate, conv_layer
from fastai.vision import Learner, create_body, models, conv2d, ifnone, DatasetType, range_of, progress_bar, cnn_learner, Image
from fastai.torch_core import to_np
from fastai.vision.data import pil2tensor
from RetinaNet.object_detection_helper import process_output, nms, rescale_boxes, GeneralEnsemble
from RetinaNet.object_detection_helper import create_anchors, get_annotations_from_path
from RetinaNet.RetinaNetFocalLoss import FocalLoss
from RetinaNet.RetinaNet import RetinaNet
from RetinaNet.callbacks import BBLossMetrics, BBMetrics, PascalVOCMetric
def tlbr2ltwhcs(boxes, scores):
"""
Top-Left-Bottom-Right to Left-Top-Width-Height-Class-Score
"""
new_boxes = []
for box, score in zip(boxes, scores):
new_boxes.append([box[1], box[0], box[3] - box[1], box[2] - box[0], 0, score])
return new_boxes
#Helper methods for use during inference
def get_crop_coordinates(image_height, image_width, verticalCropIndex, horizontalCropIndex, model_input_size):
maxVerticalCrops = int(np.ceil(image_height / model_input_size))
maxHorizontalCrops = int(np.ceil(image_width / model_input_size))
lastValidVerticalCrop = image_height - model_input_size
lastValidHorizontalCrop = image_width - model_input_size
crop_x = (horizontalCropIndex % maxHorizontalCrops) * model_input_size
crop_x = min(crop_x, lastValidHorizontalCrop)
crop_y = (verticalCropIndex % maxVerticalCrops) * model_input_size
crop_y = min(crop_y, lastValidVerticalCrop)
return crop_y, crop_x
#Overrides fastai's default 'open_image' method to crop based on our crop counter
def setupNewCrop(verticalCropIndex, horizontalCropIndex, model_input_size=256):
def open_image_with_specific_crop(fn, convert_mode, after_open):
"""
Opens an image with a specific crop, based on horizontalCropIndex and verticalCropIndex
"""
x = PIL.Image.open(fn)
width, height = x.size
crop_y, crop_x = get_crop_coordinates(height, width, verticalCropIndex, horizontalCropIndex, model_input_size)
cropped_image = x.crop([crop_x, crop_y, crop_x + model_input_size, crop_y + model_input_size])
# standardize
return Image(pil2tensor(cropped_image, np.float32).div_(255))
#Override fastai's open_image() to use our custom open_image_with_specific_crop()
fastai.vision.data.open_image = open_image_with_specific_crop
def getMaxHeightAndWidth(learn, ds_type=DatasetType.Valid):
"""
Returns the maximum height and width for a given image dataset
"""
dl = learn.dl(ds_type)
maxHeight = 0
maxWidth = 0
for i in dl.x:
height = i.shape[1]
width = i.shape[2]
if height > maxHeight:
maxHeight = height
if width > maxWidth:
maxWidth = width
return maxHeight, maxWidth
def get_bounding_box_predictions(learn, dataloader, anchors, original_images, verticalCropIndex, horizontalCropIndex, detect_threshold = 0.5, nms_threshold = 0.1, model_input_size=256):
"""
Generates bounding box predictions for an entire epoch of a provided Dataloader
"""
all_imgs = []
all_bboxes = []
all_scores = []
batch_index = 0
for img_batch, target_batch in dataloader:
with torch.no_grad():
prediction_batch = learn.model(img_batch)
class_pred_batch, bbox_pred_batch = prediction_batch[:2]
for index, (img, clas_pred, bbox_pred) in enumerate(zip(img_batch, class_pred_batch, bbox_pred_batch)):
original_image = original_images[batch_index + index]
all_imgs.append(original_image)
#Filter out predictions below detect_thresh
bbox_pred, scores, preds = process_output(clas_pred, bbox_pred, anchors, detect_threshold)
#If there are no bounding boxes, we're done
if len(bbox_pred) <= 0:
all_bboxes.append([])
all_scores.append([])
continue
#Only keep most likely bounding boxes
to_keep = nms(bbox_pred, scores, nms_threshold)
if len(to_keep) <= 0:
all_bboxes.append([])
all_scores.append([])
continue
bbox_pred, preds, scores = bbox_pred[to_keep].cpu(), preds[to_keep].cpu(), scores[to_keep].cpu()
#Change back to pixel values
height = img.shape[1]
width = img.shape[2]
t_sz = torch.Tensor([height, width])[None].cpu()
bbox_pred = to_np(rescale_boxes(bbox_pred, t_sz))
#Get crop location
crop_y, crop_x = get_crop_coordinates(original_image.shape[1], original_image.shape[2], verticalCropIndex, horizontalCropIndex, model_input_size)
# change from CTWH to TLRB
bbox_pred[:, :2] = bbox_pred[:, :2] - bbox_pred[:, 2:] / 2
#Account for offset due to cropping
bbox_pred[:, 0] = bbox_pred[:, 0] + crop_y
bbox_pred[:, 2] = bbox_pred[:, 2] + bbox_pred[:, 0]
bbox_pred[:, 1] = bbox_pred[:, 1] + crop_x
bbox_pred[:, 3] = bbox_pred[:, 3] + bbox_pred[:, 1]
all_bboxes.append(bbox_pred)
all_scores.append(scores.numpy())
#After completing a batch, we have to keep track the total number of images we've processed
batch_index = batch_index + index + 1
return all_imgs, all_bboxes, all_scores
def ensembleBoxesFromSlices(all_preds):
boxes_by_image = {}
for preds in all_preds:
all_images, all_boxes, all_scores = preds
for i, (boxes, scores) in enumerate(zip(all_boxes, all_scores)):
if i not in boxes_by_image:
boxes_by_image[i] = []
boxes_by_image[i].append(tlbr2ltwhcs(boxes, scores))
final_preds = []
for image_index, all_boxes in boxes_by_image.items():
ensembled_boxes = GeneralEnsemble(all_boxes)
boxes_tlrb = []
for box in ensembled_boxes:
#[box_x, box_y, box_w, box_h, class, confidence]
# to
# [top, left, bottom, right, confidence]
boxes_tlrb.append([box[1], box[0], box[1] + box[3], box[0] + box[2], box[5]])
final_preds.append(boxes_tlrb)
return final_preds
def get_bounding_box_predictions_for_dataset(learn, anchors, ds_type=DatasetType.Valid, model_input_size=256):
dl = learn.dl(ds_type)
sliced_predictions = []
maxHeight, maxWidth = getMaxHeightAndWidth(learn, ds_type)
#Keep track of previous method for opening images
old_open_image = fastai.vision.data.open_image
try:
maxNumberOfVerticalCrops = ((maxHeight - 1) // model_input_size) + 1
maxNumberOfHorizontalCrops = ((maxWidth - 1) // model_input_size) + 1
original_images = list(dl.x)
for i in range(maxNumberOfVerticalCrops):
for j in range(maxNumberOfHorizontalCrops):
#Override fastai's open_image to crop at a specific location in the image
setupNewCrop(i, j)
#yield get_preds(learn.model, dl, activ=_loss_func2activ(learn.loss_func))[0]
predictions = get_bounding_box_predictions(learn, dl, anchors, original_images, i, j)
sliced_predictions.append(predictions)
predictions = ensembleBoxesFromSlices(sliced_predictions)
return predictions
finally:
#Restore original method for opening images
fastai.vision.data.open_image = old_open_image
``` |
{
"source": "JoshVarty/Tennis",
"score": 3
} |
#### File: JoshVarty/Tennis/main.py
```python
from unityagents import UnityEnvironment
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
import torch
from agent import Agent
env = UnityEnvironment(file_name="Tennis_Linux_NoVis/Tennis.x86_64", worker_id=13)
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
def maddpg(n_episodes=4000, max_t=1000, train_mode=True):
all_scores = []
scores_window = deque(maxlen=100)
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=train_mode)[brain_name]
states = np.reshape(env_info.vector_observations, (1,48))
agent_0.reset()
agent_1.reset()
scores = np.zeros(num_agents)
while True:
actions = get_actions(states, add_noise=True)
env_info = env.step(actions)[brain_name]
next_states = np.reshape(env_info.vector_observations, (1, 48))
rewards = env_info.rewards
done = env_info.local_done
agent_0.step(states, actions, rewards[0], next_states, done, 0)
agent_1.step(states, actions, rewards[1], next_states, done, 1)
scores += np.max(rewards)
states = next_states
if np.any(done):
# we're done when the ball hit the ground or goes out of bounds
scores_window.append(np.mean(scores))
all_scores.append(np.mean(scores))
break
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window) >= 0.5:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)), flush=True)
# Save only the actor because that's all we need to run at test (visualization) time
torch.save(agent_0.actor_local.state_dict(), 'checkpoint_actor_0.pth')
torch.save(agent_1.actor_local.state_dict(), 'checkpoint_actor_1.pth')
break
return all_scores
def get_actions(states, add_noise=False):
action_0 = agent_0.act(states, add_noise)
action_1 = agent_1.act(states, add_noise)
return np.stack((action_0, action_1), axis=0).flatten()
agent_0 = Agent(state_size, action_size)
agent_1 = Agent(state_size, action_size)
scores = maddpg()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
``` |
{
"source": "joshvernon/payday-leap-year-calculator",
"score": 3
} |
#### File: joshvernon/payday-leap-year-calculator/api.py
```python
from datetime import date
import responder
import calculator
api = responder.API()
@api.route("/years")
def get_years_collection(req, resp):
try:
validated_params = validate_params(**req.params)
results = calculator.get_payday_leap_years(
validated_params['payday'],
frequency=validated_params['frequency'],
count=validated_params['count'],
starting_year=validated_params['year']
)
resp.media = {'paydayLeapYears': results}
except ValidationException:
resp.media = {'error': 'Invalid parameter value'}
resp.status_code = api.status_codes.HTTP_400
@api.route("/years/{year}")
def get_year_resource(req, resp, *, year):
try:
if req.method != 'get':
resp.status_code = api.status_codes.HTTP_405
return
validated_params = validate_params(startYear=(year,), **req.params)
print(validated_params)
result = calculator.is_payday_leap_year(
validated_params['year'],
validated_params['payday'],
frequency=validated_params['frequency']
)
resp.media = {'isPaydayLeapYear': result}
except ValidationException:
resp.media = {'error': 'Invalid parameter value'}
resp.status_code = api.status_codes.HTTP_400
def validate_params(**kwargs):
try:
validated_params = {}
if 'payday' in kwargs:
validated_params['payday'] = date.fromisoformat(kwargs['payday'][0])
else:
validated_params['payday'] = date.today()
if 'frequency' in kwargs:
validated_params['frequency'] = kwargs['frequency'][0]
else:
validated_params['frequency'] = 'biweekly'
if 'count' in kwargs:
validated_params['count'] = int(kwargs['count'][0])
else:
validated_params['count'] = 5
if 'startYear' in kwargs:
validated_params['year'] = int(kwargs['startYear'][0])
else:
validated_params['year'] = date.today().year
return validated_params
except:
raise ValidationException()
class ValidationException(Exception):
pass
if __name__ == '__main__':
api.run()
```
#### File: joshvernon/payday-leap-year-calculator/calculator.py
```python
from datetime import date
def is_payday_leap_year(year, payday, frequency='biweekly'):
"""Determine if a given year is a payday leap year.
Determine if a given year is a payday leap year, based on the
given payday on a weekly or biweekly pay calendar (specified by
`frequency`). Assumes paychecks are allowed to be disbursed
on holidays.
Args:
year (int): The year we're testing.
payday (date): A payday from the specified pay calendar.
Does not need to be in the same year as `year`.
frequency (str): Pay frequency. Valid values are 'weekly'
or 'biweekly'. Default is 'biweekly'.
Returns:
True if the year is a payday leap year, False if not.
"""
new_years_day = date(year, 1, 1)
jan_2 = date(year, 1, 2)
freq_in_days = 7 if frequency == 'weekly' else 14
# Determine if new year's day is a payday.
# If new year's day is a payday, then it's always a 27 payday year.
if abs((payday - new_years_day).days) % freq_in_days == 0:
result = True
# Handle leap years - Jan. 2 can also be a payday.
elif (year % 4 == 0) and (abs((payday - jan_2).days) % freq_in_days == 0):
result = True
else:
result = False
return result
def get_payday_leap_years(
payday,
frequency='biweekly',
count=5,
starting_year=date.today().year
):
"""Get the next n payday leap years.
Return a list of the next n payday leap years, where n is specified
by `count`.
Args:
payday (date): A payday from the specified pay calendar.
frequency (str): Pay frequency. Valid values are 'weekly'
or 'biweekly'. Default is 'biweekly'.
count (int): The number of payday leap years to return. Default is 5.
starting_year (int): The year to start counting from.
Returns:
A list of ints.
"""
results = []
# Start counting from the current year.
year = starting_year
while len(results) < count:
if is_payday_leap_year(year, payday, frequency):
results.append(year)
year += 1
return results
if __name__ == '__main__':
year = 2018
# January 11, 2018
payday = date(2018, 1, 11)
for i in range(51):
print("{0} is a payday leap year: {1}".format(
year, is_payday_leap_year(year, payday)
))
year += 1
```
#### File: payday-leap-year-calculator/tests/test_calculator.py
```python
import unittest
from datetime import date
from calculator import is_payday_leap_year, get_payday_leap_years
class IsPaydayLeapYearTestCase(unittest.TestCase):
def test_thursday_2018_is_false(self):
payday = date(2018, 1, 11)
self.assertFalse(is_payday_leap_year(2018, payday))
def test_thursday_2037_is_true(self):
payday = date(2018, 1, 11)
self.assertTrue(is_payday_leap_year(2037, payday))
def test_thursday_2037_alternate_week_is_false(self):
payday = date(2018, 1, 4)
self.assertFalse(is_payday_leap_year(2037, payday))
def test_friday_2038_is_true(self):
payday = date(2018, 1, 12)
self.assertTrue(is_payday_leap_year(2038, payday))
def test_thursday_2015_weekly_is_true(self):
payday = date(2018, 1, 11)
self.assertTrue(is_payday_leap_year(2015, payday, 'weekly'))
def test_unique_weekly_year(self):
payday = date(2018, 1, 11)
# 2020 is not a payday leap year for the biweekly frequency.
self.assertTrue(is_payday_leap_year(2020, payday, 'weekly'))
def test_invalid_year_raises_TypeError(self):
payday = (2017, 10, 19)
self.assertRaises(TypeError, is_payday_leap_year, 'not_an_int', payday)
def test_invalid_payday_raises_TypeError(self):
self.assertRaises(TypeError, is_payday_leap_year, 2018, 'not_a_date')
def test_invalid_frequency_defaults_to_14(self):
payday = date(2018, 1, 11)
# Should default to biweekly - 2015 will be valid.
self.assertTrue(is_payday_leap_year(2015, payday, 'not_valid'))
# Would be True for weekly - checking this doesn't happen.
self.assertFalse(is_payday_leap_year(2020, payday, 'not_valid'))
class GetPaydayLeapYearsTestCase(unittest.TestCase):
def test_7_26_2018_returns_correct_years(self):
payday = date(2018, 7, 26)
expected = [2026, 2037, 2048, 2060, 2071]
results = get_payday_leap_years(payday)
self.assertEqual(results, expected)
def test_7_26_2018_returns_correct_years_weekly_frequency(self):
payday = date(2018, 7, 26)
expected = [2020, 2026, 2032, 2037, 2043]
results = get_payday_leap_years(payday, frequency='weekly')
self.assertEqual(results, expected)
def test_non_default_count(self):
payday = date(2018, 7, 26)
results = get_payday_leap_years(payday, count=10)
self.assertEqual(len(results), 10)
def test_zero_count_returns_empty_result_set(self):
payday = date(2018, 7, 26)
results = get_payday_leap_years(payday, count=0)
self.assertEqual(len(results), 0)
def test_non_default_starting_year(self):
payday = date(2018, 7, 26)
results = get_payday_leap_years(payday, starting_year=2000)
self.assertIn(2004, results)
if __name__ == '__main__':
unittest.main(verbosity=2)
``` |
{
"source": "joshvillbrandt/django-quick-start-app",
"score": 2
} |
#### File: joshvillbrandt/django-quick-start-app/views.py
```python
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render
from django.core.urlresolvers import reverse
from {{ app_name }}.models import *
# Create your views here.
# For more information on this file, see
# https://docs.djangoproject.com/en/{{ docs_version }}/intro/tutorial03/
def index(request):
latest_poll_list = Poll.objects.all().order_by('-pub_date')[:5]
context = {
'active_nav': '{{ app_name }}',
'latest_poll_list': latest_poll_list
}
return render(request, '{{ app_name }}/index.html', context)
def detail(request, poll_id):
try:
poll = Poll.objects.get(pk=poll_id)
except Poll.DoesNotExist:
raise Http404
context = {
'active_nav': '{{ app_name }}',
'poll': poll
}
return render(request, '{{ app_name }}/detail.html', context)
def results(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
context = {
'active_nav': '{{ app_name }}',
'poll': poll
}
return render(request, '{{ app_name }}/results.html', context)
def vote(request, poll_id):
p = get_object_or_404(Poll, pk=poll_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the poll voting form.
return render(request, '{{ app_name }}/detail.html', {
'active_nav': '{{ app_name }}',
'poll': p,
'error_message': "You didn't select a choice.",
})
return detail(request, poll_id)
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('{{ app_name }}:results', args=(p.id,)))
``` |
{
"source": "joshvillbrandt/PythonTemplate",
"score": 2
} |
#### File: PythonTemplate/tests/TestModel.py
```python
import unittest
from PythonTemplate import Model
class TestModel(unittest.TestCase):
def setUp(self):
# TODO
pass
def test_do_thing(self):
# do something
Model.doThing()
# evaluate something
# TODO
``` |
{
"source": "joshvinson/Bonisagus",
"score": 3
} |
#### File: Bonisagus/Core/virtue.py
```python
SPECIAL = '*'
FREE = 0
MINOR = 1
MAJOR = 3
class Virtue():
def __init__(self, virtue_type, magnitude, name, book="", pages=""):
self.name = name
self.book = book
self.pages = pages
self.magnitude = magnitude
self.virtue_type = virtue_type
def to_json(self):
return self.__dict__
class Flaw():
def __init__(self, flaw_type, magnitude, name, book="", pages=""):
self.name = name
self.book = book
self.pages = pages
self.magnitude = magnitude
self.flaw_type = flaw_type
def to_json(self):
return self.__dict__
``` |
{
"source": "JoshVStaden/codex-africanus",
"score": 2
} |
#### File: calibration/utils/dask.py
```python
from africanus.calibration.utils.correct_vis import CORRECT_VIS_DOCS
from africanus.calibration.utils.corrupt_vis import CORRUPT_VIS_DOCS
from africanus.calibration.utils.residual_vis import RESIDUAL_VIS_DOCS
from africanus.calibration.utils.compute_and_corrupt_vis import (
COMPUTE_AND_CORRUPT_VIS_DOCS)
from africanus.calibration.utils import correct_vis as np_correct_vis
from africanus.calibration.utils import (compute_and_corrupt_vis as
np_compute_and_corrupt_vis)
from africanus.calibration.utils import corrupt_vis as np_corrupt_vis
from africanus.calibration.utils import residual_vis as np_residual_vis
from africanus.calibration.utils import check_type
from africanus.calibration.utils.utils import DIAG_DIAG, DIAG, FULL
from africanus.util.requirements import requires_optional
try:
from dask.array.core import blockwise
except ImportError as e:
dask_import_error = e
else:
dask_import_error = None
def _corrupt_vis_wrapper(time_bin_indices, time_bin_counts, antenna1,
antenna2, jones, model):
return np_corrupt_vis(time_bin_indices, time_bin_counts, antenna1,
antenna2, jones[0][0], model[0])
@requires_optional('dask.array', dask_import_error)
def corrupt_vis(time_bin_indices, time_bin_counts, antenna1,
antenna2, jones, model):
mode = check_type(jones, model, vis_type='model')
if jones.chunks[1][0] != jones.shape[1]:
raise ValueError("Cannot chunk jones over antenna")
if jones.chunks[3][0] != jones.shape[3]:
raise ValueError("Cannot chunk jones over direction")
if model.chunks[2][0] != model.shape[2]:
raise ValueError("Cannot chunk model over direction")
if mode == DIAG_DIAG:
out_shape = ("row", "chan", "corr1")
model_shape = ("row", "chan", "dir", "corr1")
jones_shape = ("row", "ant", "chan", "dir", "corr1")
elif mode == DIAG:
out_shape = ("row", "chan", "corr1", "corr2")
model_shape = ("row", "chan", "dir", "corr1", "corr2")
jones_shape = ("row", "ant", "chan", "dir", "corr1")
elif mode == FULL:
out_shape = ("row", "chan", "corr1", "corr2")
model_shape = ("row", "chan", "dir", "corr1", "corr2")
jones_shape = ("row", "ant", "chan", "dir", "corr1", "corr2")
else:
raise ValueError("Unknown mode argument of %s" % mode)
# the new_axes={"corr2": 2} is required because of a dask bug
# see https://github.com/dask/dask/issues/5550
return blockwise(_corrupt_vis_wrapper, out_shape,
time_bin_indices, ("row",),
time_bin_counts, ("row",),
antenna1, ("row",),
antenna2, ("row",),
jones, jones_shape,
model, model_shape,
adjust_chunks={"row": antenna1.chunks[0]},
new_axes={"corr2": 2},
dtype=model.dtype,
align_arrays=False)
def _compute_and_corrupt_vis_wrapper(time_bin_indices, time_bin_counts,
antenna1, antenna2, jones, model,
uvw, freq, lm):
return np_compute_and_corrupt_vis(time_bin_indices, time_bin_counts,
antenna1, antenna2, jones[0][0],
model[0], uvw[0], freq, lm[0][0])
@requires_optional('dask.array', dask_import_error)
def compute_and_corrupt_vis(time_bin_indices, time_bin_counts,
antenna1, antenna2, jones, model,
uvw, freq, lm):
if jones.chunks[1][0] != jones.shape[1]:
raise ValueError("Cannot chunk jones over antenna")
if jones.chunks[3][0] != jones.shape[3]:
raise ValueError("Cannot chunk jones over direction")
if model.chunks[2][0] != model.shape[2]:
raise ValueError("Cannot chunk model over direction")
if uvw.chunks[1][0] != uvw.shape[1]:
raise ValueError("Cannot chunk uvw over last axis")
if lm.chunks[1][0] != lm.shape[1]:
raise ValueError("Cannot chunks lm over direction")
if lm.chunks[2][0] != lm.shape[2]:
raise ValueError("Cannot chunks lm over last axis")
mode = check_type(jones, model, vis_type='model')
if mode == DIAG_DIAG:
out_shape = ("row", "chan", "corr1")
model_shape = ("row", "chan", "dir", "corr1")
jones_shape = ("row", "ant", "chan", "dir", "corr1")
elif mode == DIAG:
out_shape = ("row", "chan", "corr1", "corr2")
model_shape = ("row", "chan", "dir", "corr1", "corr2")
jones_shape = ("row", "ant", "chan", "dir", "corr1")
elif mode == FULL:
out_shape = ("row", "chan", "corr1", "corr2")
model_shape = ("row", "chan", "dir", "corr1", "corr2")
jones_shape = ("row", "ant", "chan", "dir", "corr1", "corr2")
else:
raise ValueError("Unknown mode argument of %s" % mode)
# the new_axes={"corr2": 2} is required because of a dask bug
# see https://github.com/dask/dask/issues/5550
return blockwise(_compute_and_corrupt_vis_wrapper, out_shape,
time_bin_indices, ("row",),
time_bin_counts, ("row",),
antenna1, ("row",),
antenna2, ("row",),
jones, jones_shape,
model, model_shape,
uvw, ("row", "three"),
freq, ("chan",),
lm, ("row", "dir", "two"),
adjust_chunks={"row": antenna1.chunks[0]},
new_axes={"corr2": 2},
dtype=model.dtype,
align_arrays=False)
def _correct_vis_wrapper(time_bin_indices, time_bin_counts, antenna1,
antenna2, jones, vis, flag):
return np_correct_vis(time_bin_indices, time_bin_counts, antenna1,
antenna2, jones[0][0], vis, flag)
@requires_optional('dask.array', dask_import_error)
def correct_vis(time_bin_indices, time_bin_counts, antenna1,
antenna2, jones, vis, flag):
if jones.chunks[1][0] != jones.shape[1]:
raise ValueError("Cannot chunk jones over antenna")
if jones.chunks[3][0] != jones.shape[3]:
raise ValueError("Cannot chunk jones over direction")
mode = check_type(jones, vis)
if mode == DIAG_DIAG:
out_shape = ("row", "chan", "corr1")
jones_shape = ("row", "ant", "chan", "dir", "corr1")
elif mode == DIAG:
out_shape = ("row", "chan", "corr1", "corr2")
jones_shape = ("row", "ant", "chan", "dir", "corr1")
elif mode == FULL:
out_shape = ("row", "chan", "corr1", "corr2")
jones_shape = ("row", "ant", "chan", "dir", "corr1", "corr2")
else:
raise ValueError("Unknown mode argument of %s" % mode)
# the new_axes={"corr2": 2} is required because of a dask bug
# see https://github.com/dask/dask/issues/5550
return blockwise(_correct_vis_wrapper, out_shape,
time_bin_indices, ("row",),
time_bin_counts, ("row",),
antenna1, ("row",),
antenna2, ("row",),
jones, jones_shape,
vis, out_shape,
flag, out_shape,
adjust_chunks={"row": antenna1.chunks[0]},
new_axes={"corr2": 2},
dtype=vis.dtype,
align_arrays=False)
def _residual_vis_wrapper(time_bin_indices, time_bin_counts, antenna1,
antenna2, jones, vis, flag, model):
return np_residual_vis(time_bin_indices, time_bin_counts, antenna1,
antenna2, jones[0][0], vis, flag, model[0])
@requires_optional('dask.array', dask_import_error)
def residual_vis(time_bin_indices, time_bin_counts, antenna1,
antenna2, jones, vis, flag, model):
if jones.chunks[1][0] != jones.shape[1]:
raise ValueError("Cannot chunk jones over antenna")
if jones.chunks[3][0] != jones.shape[3]:
raise ValueError("Cannot chunk jones over direction")
if model.chunks[2][0] != model.shape[2]:
raise ValueError("Cannot chunk model over direction")
mode = check_type(jones, vis)
if mode == DIAG_DIAG:
out_shape = ("row", "chan", "corr1")
model_shape = ("row", "chan", "dir", "corr1")
jones_shape = ("row", "ant", "chan", "dir", "corr1")
elif mode == DIAG:
out_shape = ("row", "chan", "corr1", "corr2")
model_shape = ("row", "chan", "dir", "corr1", "corr2")
jones_shape = ("row", "ant", "chan", "dir", "corr1")
elif mode == FULL:
out_shape = ("row", "chan", "corr1", "corr2")
model_shape = ("row", "chan", "dir", "corr1", "corr2")
jones_shape = ("row", "ant", "chan", "dir", "corr1", "corr2")
else:
raise ValueError("Unknown mode argument of %s" % mode)
# the new_axes={"corr2": 2} is required because of a dask bug
# see https://github.com/dask/dask/issues/5550
return blockwise(_residual_vis_wrapper, out_shape,
time_bin_indices, ("row",),
time_bin_counts, ("row",),
antenna1, ("row",),
antenna2, ("row",),
jones, jones_shape,
vis, out_shape,
flag, out_shape,
model, model_shape,
adjust_chunks={"row": antenna1.chunks[0]},
new_axes={"corr2": 2},
dtype=vis.dtype,
align_arrays=False)
compute_and_corrupt_vis.__doc__ = COMPUTE_AND_CORRUPT_VIS_DOCS.substitute(
array_type=":class:`dask.array.Array`")
corrupt_vis.__doc__ = CORRUPT_VIS_DOCS.substitute(
array_type=":class:`dask.array.Array`")
correct_vis.__doc__ = CORRECT_VIS_DOCS.substitute(
array_type=":class:`dask.array.Array`")
residual_vis.__doc__ = RESIDUAL_VIS_DOCS.substitute(
array_type=":class:`dask.array.Array`")
```
#### File: deconv/hogbom/clean.py
```python
import logging
import numba
import numpy as np
try:
import scipy.signal
from scipy import optimize as opt
except ImportError as e:
opt_import_err = e
else:
opt_import_err = None
from africanus.util.requirements import requires_optional
@numba.jit(nopython=True, nogil=True, cache=True)
def twod_gaussian(coords, amplitude, xo, yo, sigma_x, sigma_y, theta, offset):
x = coords[0]
y = coords[1]
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = (offset + amplitude *
np.exp(- (a*((x-xo)**2) +
2*b*(x-xo)*(y-yo) +
c*((y-yo)**2))))
return g.flatten()
@requires_optional('scipy', opt_import_err)
def fit_2d_gaussian(psf):
"""
Fit an elliptical Gaussian to the primary lobe of the psf
"""
# Get the full width at half maximum height of the psf
# numba doesn't have argwhere, but it can jit argwhere's
# implementation
# I = np.stack((psf>=0.5*psf.max()).nonzero()).transpose()
loc = np.argwhere(psf >= 0.5*psf.max())
# Create an array with these values at the same indices and zeros otherwise
lk, mk = psf.shape
psf_fit = np.zeros_like(psf)
psf_fit[loc[:, 0], loc[:, 1]] = psf[loc[:, 0], loc[:, 1]]
# Create x and y indices
x = np.linspace(0, psf.shape[0]-1, psf.shape[0])
y = np.linspace(0, psf.shape[1]-1, psf.shape[1])
x, y = np.meshgrid(x, y)
# Set starting point of optimiser
initial_guess = (0.5, lk/2, mk/2, 1.75, 1.4, -4.0, 0)
# Flatten the data
data = psf_fit.ravel()
# Fit the function (Gaussian for now)
popt, pcov = opt.curve_fit(twod_gaussian, (x, y), data, p0=initial_guess)
# Get function with fitted params
data_fitted = twod_gaussian((x, y), *popt)
# Normalise the psf to have a max value of one
data_fitted = data_fitted/data_fitted.max()
return data_fitted.reshape(lk, mk)
@numba.jit(nopython=True, nogil=True, cache=True)
def find_peak(residuals):
abs_residuals = residuals
min_peak = abs_residuals.min()
max_peak = abs_residuals.max()
nx, ny = abs_residuals.shape
minx, miny = -1, -1
maxx, maxy = -1, -1
peak_intensity = -1
for x in range(nx):
for y in range(ny):
intensity = abs_residuals[x, y]
if intensity == min_peak:
minx = x
miny = y
if intensity == max_peak:
maxx = x
maxy = y
peak_intensity = intensity
if minx == -1 or miny == -1:
raise ValueError("Minimum peak not found")
if maxx == -1 or maxy == -1:
raise ValueError("Maximum peak not found")
return maxx, maxy, minx, miny, peak_intensity
@numba.jit(nopython=True, nogil=True, cache=True)
def build_cleanmap(clean, intensity, gamma, p, q):
clean[p, q] += intensity*gamma
@numba.jit(nopython=True, nogil=True, cache=True)
def update_residual(residual, intensity, gamma, p, q, npix, psf):
npix = residual.shape[0] # Assuming square image
residual -= gamma*intensity*psf[npix - 1 - p:2*npix - 1 - p,
npix - 1 - q:2*npix - 1 - q]
def hogbom_clean(dirty, psf,
gamma=0.1,
threshold="default",
niter="default"):
"""
Performs Hogbom Clean on the ``dirty`` image given the ``psf``.
Parameters
----------
dirty : np.ndarray
float64 dirty image of shape (ny, nx)
psf : np.ndarray
float64 Point Spread Function of shape (2*ny, 2*nx)
gamma (optional) float
the gain factor (must be less than one)
threshold (optional) : float or str
the threshold to clean to
niter (optional : integer
the maximum number of iterations allowed
Returns
-------
np.ndarray
float64 clean image of shape (ny, nx)
np.ndarray
float64 residual image of shape (ny, nx)
"""
# deep copy dirties to first residuals,
# want to keep the original dirty maps
residuals = dirty.copy()
# Check that psf is twice the size of residuals
if (psf.shape[0] != 2*residuals.shape[0] or
psf.shape[1] != 2*residuals.shape[1]):
raise ValueError("Warning psf not right size")
# Initialise array to store cleaned image
clean = np.zeros_like(residuals)
assert clean.shape[0] == clean.shape[1]
npix = clean.shape[0]
if niter == "default":
niter = 3*npix
p, q, pmin, qmin, intensity = find_peak(residuals)
if threshold == "default":
# Imin + 0.001*(intensity - Imin)
threshold = 0.2*np.abs(intensity)
logging.info("Threshold set at %s", threshold)
else:
# Imin + 0.001*(intensity - Imin)
threshold = threshold*np.abs(intensity)
logging.info("Assuming user set threshold at %s", threshold)
# CLEAN the image
i = 0
while np.abs(intensity) > threshold and i <= niter:
logging.info("min %f max %f peak %f threshold %f" %
(residuals.min(), residuals.max(), intensity, threshold))
# First we set the
build_cleanmap(clean, intensity, gamma, p, q)
# Subtract out pixel
update_residual(residuals, intensity, gamma, p, q, npix, psf)
# Get new indices where residuals is max
p, q, _, _, intensity = find_peak(residuals)
# Increment counter
i += 1
# Warn if niter exceeded
if i > niter:
logging.warn("Number of iterations exceeded")
logging.warn("Minimum residuals = %s", residuals.max())
logging.info("Done cleaning after %d iterations.", i)
return clean, residuals
@requires_optional("scipy", opt_import_err)
def restore(clean, psf, residuals):
"""
Parameters
----------
clean : np.ndarray
float64 clean image of shape (ny, nx)
psf : np.ndarray
float64 Point Spread Function of shape (2*ny, 2*nx)
residuals : np.ndarray
float64 residual image of shape (ny, nx)
Returns
-------
np.ndarray
float64 Restored image of shape (ny, nx)
np.ndarray
float64 Convolved model of shape (ny, nx)
"""
logging.info("Fitting 2D Gaussian")
# get the ideal beam (fit 2D Gaussian to HWFH of psf)
clean_beam = fit_2d_gaussian(psf)
logging.info("Convolving")
# cval=0.0) #Fast using fft
iconv_model = scipy.signal.fftconvolve(clean, clean_beam, mode='same')
logging.info("Convolving done")
# Finally we add the residuals back to the image
restored = iconv_model + residuals
return (restored, iconv_model)
if __name__ == "__main__":
pass
```
#### File: dft/examples/predict_from_fits.py
```python
import argparse
import numpy as np
from astropy.io import fits
import dask
import dask.array as da
from dask.diagnostics import ProgressBar
from africanus.dft.dask import im_to_vis
from daskms import xds_from_ms, xds_from_table, xds_to_table
def create_parser():
p = argparse.ArgumentParser()
p.add_argument("ms", help="Name of MS")
p.add_argument("--fitsmodel", help="Fits file to predict from")
p.add_argument("--row_chunks", default=30000, type=int,
help="How to chunks up row dimension.")
p.add_argument("--ncpu", default=0, type=int,
help="Number of threads to use for predict")
p.add_argument("--colname", default="MODEL_DATA",
help="Name of column to write data to.")
p.add_argument('--field', default=0, type=int,
help="Field ID to predict to.")
return p
args = create_parser().parse_args()
if args.ncpu:
ncpu = args.ncpu
from multiprocessing.pool import ThreadPool
dask.config.set(pool=ThreadPool(ncpu))
else:
import multiprocessing
ncpu = multiprocessing.cpu_count()
print("Using %i threads" % ncpu)
# Get MS frequencies
spw_ds = list(xds_from_table("::".join((args.ms, "SPECTRAL_WINDOW")),
group_cols="__row__"))[0]
# Get frequencies in the measurement set
# If these do not match those in the fits
# file we need to interpolate
ms_freqs = spw_ds.CHAN_FREQ.data[0].compute()
nchan = ms_freqs.size
# load in the fits file
model = fits.getdata(args.fitsmodel)
# get header
hdr = fits.getheader(args.fitsmodel)
# TODO - check that PHASE_DIR in MS matches that in fits
# get image coordinates
if hdr['CUNIT1'] != "DEG" and hdr['CUNIT1'] != "deg":
raise ValueError("Image units must be in degrees")
npix_l = hdr['NAXIS1']
refpix_l = hdr['CRPIX1']
delta_l = hdr['CDELT1'] * np.pi/180 # assumes untis are deg
l0 = hdr['CRVAL1'] * np.pi/180
l_coord = np.sort(np.arange(1 - refpix_l, 1 + npix_l - refpix_l)*delta_l)
if hdr['CUNIT2'] != "DEG" and hdr['CUNIT2'] != "deg":
raise ValueError("Image units must be in degrees")
npix_m = hdr['NAXIS2']
refpix_m = hdr['CRPIX2']
delta_m = hdr['CDELT2'] * np.pi/180 # assumes untis are deg
m0 = hdr['CRVAL2'] * np.pi/180
m_coord = np.arange(1 - refpix_m, 1 + npix_m - refpix_m)*delta_m
npix_tot = npix_l * npix_m
# get frequencies
if hdr["CTYPE4"] == 'FREQ':
nband = hdr['NAXIS4']
refpix_nu = hdr['CRPIX4']
delta_nu = hdr['CDELT4'] # assumes units are Hz
ref_freq = hdr['CRVAL4']
ncorr = hdr['NAXIS3']
freq_axis = str(4)
elif hdr["CTYPE3"] == 'FREQ':
nband = hdr['NAXIS3']
refpix_nu = hdr['CRPIX3']
delta_nu = hdr['CDELT3'] # assumes units are Hz
ref_freq = hdr['CRVAL3']
ncorr = hdr['NAXIS4']
freq_axis = str(3)
else:
raise ValueError("Freq axis must be 3rd or 4th")
freqs = ref_freq + np.arange(1 - refpix_nu, 1 + nband - refpix_nu) * delta_nu
print("Reference frequency is ", ref_freq)
# TODO - need to use convert for this
if ncorr > 1:
raise ValueError("Currently only works on a single correlation")
# if frequencies do not match we need to reprojects fits cube
if np.any(ms_freqs != freqs):
print("Warning - reprojecting fits cube to MS freqs. "
"This uses a lot of memory. ")
from scipy.interpolate import RegularGridInterpolator
# interpolate fits cube
fits_interp = RegularGridInterpolator((freqs, l_coord, m_coord),
model.squeeze(),
bounds_error=False,
fill_value=None)
# reevaluate at ms freqs
vv, ll, mm = np.meshgrid(ms_freqs, l_coord, m_coord,
indexing='ij')
vlm = np.vstack((vv.flatten(), ll.flatten(), mm.flatten())).T
model_cube = fits_interp(vlm).reshape(nchan, npix_l, npix_m)
else:
model_cube = model
# set up coords for DFT
ll, mm = np.meshgrid(l_coord, m_coord)
lm = np.vstack((ll.flatten(), mm.flatten())).T
# get non-zero components of model
model_cube = model_cube.reshape(nchan, npix_tot)
model_max = np.amax(np.abs(model_cube), axis=0)
idx_nz = np.argwhere(model_max > 0.0).squeeze()
model_predict = np.transpose(model_cube[:, None, idx_nz],
[2, 0, 1])
ncomps = idx_nz.size
model_predict = da.from_array(model_predict, chunks=(ncomps, nchan, ncorr))
lm = da.from_array(lm[idx_nz, :], chunks=(ncomps, 2))
ms_freqs = spw_ds.CHAN_FREQ.data
xds = xds_from_ms(args.ms, columns=["UVW", args.colname],
chunks={"row": args.row_chunks})[0]
uvw = xds.UVW.data
vis = im_to_vis(model_predict, uvw, lm, ms_freqs)
data = getattr(xds, args.colname)
if data.shape != vis.shape:
print("Assuming only Stokes I passed in")
if vis.shape[-1] == 1 and data.shape[-1] == 4:
tmp_zero = da.zeros(vis.shape, chunks=(args.row_chunks, nchan, 1))
vis = da.concatenate((vis, tmp_zero, tmp_zero, vis), axis=-1)
elif vis.shape[-1] == 1 and data.shape[-1] == 2:
vis = da.concatenate((vis, vis), axis=-1)
else:
raise ValueError("Incompatible corr axes")
vis = vis.rechunk((args.row_chunks, nchan, data.shape[-1]))
# Assign visibilities to MODEL_DATA array on the dataset
xds = xds.assign(**{args.colname: (("row", "chan", "corr"), vis)})
# Create a write to the table
write = xds_to_table(xds, args.ms, [args.colname])
# Submit all graph computations in parallel
with ProgressBar():
dask.compute(write)
```
#### File: nifty/tests/test_nifty_gridder.py
```python
import numpy as np
from numpy.testing import assert_array_almost_equal
import pickle
import pytest
from africanus.gridding.nifty.dask import (grid, degrid, dirty, model,
grid_config)
def rf(*a, **kw):
return np.random.random(*a, **kw)
def rc(*a, **kw):
return rf(*a, **kw) + 1j*rf(*a, **kw)
def test_dask_nifty_gridder():
""" Only tests that we can call it and create a dirty image """
dask = pytest.importorskip('dask')
da = pytest.importorskip('dask.array')
_ = pytest.importorskip('nifty_gridder')
row = (16,)*8
chan = (32,)
corr = (4,)
nx = 1026
ny = 1022
nrow = sum(row)
nchan = sum(chan)
ncorr = sum(corr)
# Random UV data
uvw = rf(size=(nrow, 3)).astype(np.float64)*128
vis = rf(size=(nrow, nchan, ncorr)).astype(np.complex128)
freq = np.linspace(.856e9, 2*.856e9, nchan)
flag = np.zeros(vis.shape, dtype=np.uint8)
flag = np.random.randint(0, 2, vis.shape, dtype=np.uint8).astype(np.bool)
weight = rf(vis.shape).astype(np.float64)
da_vis = da.from_array(vis, chunks=(row, chan, corr))
da_uvw = da.from_array(uvw, chunks=(row, 3))
da_freq = da.from_array(freq, chunks=chan)
da_flag = da.from_array(flag, chunks=(row, chan, corr))
da_weight = da.from_array(weight, chunks=(row, chan, corr))
gc = grid_config(nx, ny, 2e-13, 2.0, 2.0)
# Standard fan reduction
g = grid(da_vis, da_uvw, da_flag, da_weight, da_freq, gc)
d1 = dirty(g, gc)
grid_shape = (gc.object.Nu(), gc.object.Nv(), ncorr)
dirty_shape = (gc.object.Nxdirty(), gc.object.Nydirty(), ncorr)
assert g.shape == grid_shape
assert d1.shape == dirty_shape == (nx, ny, ncorr)
# Stream reduction (single stream)
g = grid(da_vis, da_uvw, da_flag, da_weight, da_freq, gc, streams=1)
d2 = dirty(g, gc)
assert g.shape == grid_shape
assert d2.shape == dirty_shape == (nx, ny, ncorr)
# Stream reductino (three streams)
g = grid(da_vis, da_uvw, da_flag, da_weight, da_freq, gc, streams=3)
d3 = dirty(g, gc)
assert g.shape == grid_shape
assert d3.shape == dirty_shape == (nx, ny, ncorr)
# All three techniques produce similar results
d1, d2, d3 = dask.compute(d1, d2, d3)
assert_array_almost_equal(d1, d2)
assert_array_almost_equal(d2, d3)
def test_dask_nifty_degridder():
""" Only tests that we can call it and create some visibilities """
da = pytest.importorskip('dask.array')
_ = pytest.importorskip('nifty_gridder')
row = (16, 16, 16, 16)
chan = (32,)
corr = (4,)
nrow = sum(row)
nchan = sum(chan)
ncorr = sum(corr)
nx = 1026
ny = 1022
gc = grid_config(nx, ny, 2e-13, 2.0, 2.0)
# Random UV data
uvw = rf(size=(nrow, 3)).astype(np.float64)*128
freq = np.linspace(.856e9, 2*.856e9, nchan)
flag = np.zeros((nrow, nchan, ncorr), dtype=np.bool)
weight = np.ones((nrow, nchan, ncorr), dtype=np.float64)
image = rc(size=(nx, ny, ncorr)).astype(np.complex128)
da_uvw = da.from_array(uvw, chunks=(row, 3))
da_freq = da.from_array(freq, chunks=chan)
da_flag = da.from_array(flag, chunks=(row, chan, corr))
da_weight = da.from_array(weight, chunks=(row, chan, corr))
da_image = da.from_array(image, chunks=(nx, ny, 1))
da_grid_vis = model(da_image, gc)
da_vis = degrid(da_grid_vis, da_uvw, da_flag, da_weight, da_freq, gc)
vis = da_vis.compute()
assert vis.shape == da_vis.shape
def test_pickle_gridder_config():
gc = grid_config(512, 1024, 5e-13, 1.3, 2.0)
gc2 = pickle.loads(pickle.dumps(gc))
assert gc is not gc2
assert gc.object.Nxdirty() == gc2.object.Nxdirty() == 512
assert gc.object.Nydirty() == gc2.object.Nydirty() == 1024
assert gc.object.Epsilon() == gc2.object.Epsilon() == 5e-13
assert gc.object.Pixsize_x() == gc2.object.Pixsize_x() == 1.3
assert gc.object.Pixsize_y() == gc2.object.Pixsize_y() == 2.0
```
#### File: perleypolyhedron/policies/baseline_transform_policies.py
```python
from africanus.util.numba import jit, overload
from numpy import cos, sin
def uvw_norotate(uvw, ra0, dec0, ra, dec, policy_type):
pass
def uvw_rotate(uvw, ra0, dec0, ra, dec, policy_type):
'''
Compute the following 3x3 coordinate transformation matrix:
Z_rot(facet_new_rotation) * \\
T(new_phase_centre_ra,new_phase_centre_dec) * \\
transpose(T(old_phase_centre_ra,
old_phase_centre_dec)) * \\
transpose(Z_rot(facet_old_rotation))
where:
| cRA -sRA 0 |
T (RA,D) = | -sDsRA -sDcRA cD |
| cDsRA cDcRA sD |
This is the similar to the one in <NAME>.; <NAME>.;
and <NAME>., Jr. Interferometry and Synthesis
in Radio Astronomy, New York: Wiley, ch. 4, but in a
lefthanded system.
We're not transforming between a coordinate system with w pointing
towards the pole and one with w pointing towards the reference
centre here, so the last rotation matrix is ignored!
This transformation will let the image be tangent to the celestial
sphere at the new delay centre
'''
d_ra = ra - ra0
c_d_ra = cos(d_ra)
s_d_ra = sin(d_ra)
c_new_dec = cos(dec)
c_old_dec = cos(dec0)
s_new_dec = sin(dec)
s_old_dec = sin(dec0)
mat_11 = c_d_ra
mat_12 = s_old_dec * s_d_ra
mat_13 = -c_old_dec * s_d_ra
mat_21 = -s_new_dec * s_d_ra
mat_22 = s_new_dec * s_old_dec * c_d_ra + c_new_dec * c_old_dec
mat_23 = -c_old_dec * s_new_dec * c_d_ra + c_new_dec * s_old_dec
mat_31 = c_new_dec * s_d_ra
mat_32 = -c_new_dec * s_old_dec * c_d_ra + s_new_dec * c_old_dec
mat_33 = c_new_dec * c_old_dec * c_d_ra + s_new_dec * s_old_dec
uvw[0] = mat_11 * uvw[0] + mat_12 * uvw[1] + mat_13 * uvw[3]
uvw[1] = mat_21 * uvw[0] + mat_22 * uvw[1] + mat_23 * uvw[3]
uvw[2] = mat_31 * uvw[0] + mat_32 * uvw[1] + mat_33 * uvw[3]
@jit(nopython=True, nogil=True, fastmath=True, parallel=False)
def uvw_planarwapprox(uvw, ra0, dec0, ra, dec, policy_type):
'''
Implements the coordinate uv transform associated with taking a planar
approximation to w(n-1) as described in Kogan & Greisen's AIPS Memo 113
This is essentially equivalent to rotating the facet to be tangent to
the celestial sphere as Perley suggested to limit error, but it instead
takes w into account in a linear approximation to the phase error near
the facet centre. This keeps the facets parallel to the original facet
plane. Of course this 2D taylor expansion of the first order is only
valid over a small field of view, but that true of normal tilted
faceting as well. Only a convolution can get rid of the (n-1)
factor in the ME.
'''
d_ra = ra - ra0
n_dec = dec
o_dec = dec0
c_d_ra = cos(d_ra)
s_d_ra = sin(d_ra)
c_new_dec = cos(n_dec)
c_old_dec = cos(o_dec)
s_new_dec = sin(n_dec)
s_old_dec = sin(o_dec)
li0 = c_new_dec * s_d_ra
mi0 = s_new_dec * c_old_dec - c_new_dec * s_old_dec * c_d_ra
ni0 = s_new_dec * s_old_dec + c_new_dec * c_old_dec * c_d_ra
uvw[0] = uvw[0] - uvw[2] * li0 / ni0
uvw[1] = uvw[1] - uvw[2] * mi0 / ni0
def policy(uvw, ra0, dec0, ra, dec, policy_type):
pass
@overload(policy, inline="always")
def policy_impl(uvw, ra0, dec0, ra, dec, policy_type):
if policy_type.literal_value == "None":
return uvw_norotate
elif policy_type.literal_value == "rotate":
return uvw_rotate
elif policy_type.literal_value == "wlinapprox":
return uvw_planarwapprox
else:
raise ValueError("Invalid baseline transform policy type")
```
#### File: perleypolyhedron/policies/phase_transform_policies.py
```python
from africanus.util.numba import overload
from numpy import pi, cos, sin, sqrt
def phase_norotate(vis,
uvw,
lambdas,
ra0,
dec0,
ra,
dec,
policy_type,
phasesign=1.0):
pass
def phase_rotate(vis,
uvw,
lambdas,
ra0,
dec0,
ra,
dec,
policy_type,
phasesign=1.0):
'''
Convert ra,dec to l,m,n based on Synthesis Imaging II, Pg. 388
The phase term (as documented in Perley & Cornwell (1992))
calculation requires the delta l,m,n coordinates.
Through simplification l0,m0,n0 = (0,0,1) (assume dec == dec0 and
ra == ra0, and the simplification follows)
l,m,n is then calculated using the new and original phase centres
as per the relation on Pg. 388
lambdas has the same shape as vis
'''
d_ra = ra - ra0
d_dec = dec
d_decp = dec0
c_d_dec = cos(d_dec)
s_d_dec = sin(d_dec)
s_d_ra = sin(d_ra)
c_d_ra = cos(d_ra)
c_d_decp = cos(d_decp)
s_d_decp = sin(d_decp)
ll = c_d_dec * s_d_ra
mm = (s_d_dec * c_d_decp - c_d_dec * s_d_decp * c_d_ra)
nn = -(1 - sqrt(1 - ll * ll - mm * mm))
for c in range(lambdas.size):
x = phasesign * 2 * pi * (uvw[0] * ll + uvw[1] * mm +
uvw[2] * nn) / lambdas[c]
vis[c, :] *= cos(x) + 1.0j * sin(x)
def policy(vis, uvw, lambdas, ra0, dec0, ra, dec, policy_type, phasesign=1.0):
pass
@overload(policy, inline="always")
def policy_impl(vis,
uvw,
lambdas,
ra0,
dec0,
ra,
dec,
policy_type,
phasesign=1.0):
if policy_type.literal_value == "None" or \
policy_type.literal_value is None:
return phase_norotate
elif policy_type.literal_value == "phase_rotate":
return phase_rotate
else:
raise ValueError("Invalid baseline transform policy type")
```
#### File: perleypolyhedron/policies/stokes_conversion_policies.py
```python
from africanus.util.numba import overload
def stokes2corr(vis_in, vis_out, policy_type):
pass
@overload(stokes2corr, inline="always")
def stokes2corrimpl(vis_in, vis_out, policy_type):
if policy_type.literal_value == "XXYY_FROM_I":
def XXYY_FROM_I(vis_in, vis_out, policy_type):
vis_out[0] += vis_in
vis_out[1] += vis_in
return XXYY_FROM_I
elif policy_type.literal_value == "XXXYYXYY_FROM_I":
def XXXYYXYY_FROM_I(vis_in, vis_out, policy_type):
vis_out[0] += vis_in
vis_out[1] += 0
vis_out[2] += 0
vis_out[3] += vis_in
return XXXYYXYY_FROM_I
elif policy_type.literal_value == "RRLL_FROM_I":
def RRLL_FROM_I(vis_in, vis_out, policy_type):
vis_out[0] += vis_in
vis_out[1] += vis_in
return RRLL_FROM_I
elif policy_type.literal_value == "RRRLLRLL_FROM_I":
def RRRLLRLL_FROM_I(vis_in, vis_out, policy_type):
vis_out[0] += vis_in
vis_out[1] += 0
vis_out[2] += 0
vis_out[3] += vis_in
return RRRLLRLL_FROM_I
elif policy_type.literal_value == "XXYY_FROM_Q":
def XXYY_FROM_Q(vis_in, vis_out, policy_type):
vis_out[0] += vis_in
vis_out[1] += -vis_in
return XXYY_FROM_Q
elif policy_type.literal_value == "XXXYYXYY_FROM_Q":
def XXXYYXYY_FROM_Q(vis_in, vis_out, policy_type):
vis_out[0] += vis_in
vis_out[1] += 0
vis_out[2] += 0
vis_out[3] += -vis_in
return XXXYYXYY_FROM_Q
elif policy_type.literal_value == "RLLR_FROM_Q":
def RLLR_FROM_Q(vis_in, vis_out, policy_type):
vis_out[0] += vis_in
vis_out[1] += vis_in
return RLLR_FROM_Q
elif policy_type.literal_value == "RRRLLRLL_FROM_Q":
def RRRLLRLL_FROM_Q(vis_in, vis_out, policy_type):
vis_out[0] += 0
vis_out[1] += vis_in
vis_out[2] += vis_in
vis_out[3] += 0
return RRRLLRLL_FROM_Q
elif policy_type.literal_value == "XYYX_FROM_U":
def XYYX_FROM_U(vis_in, vis_out, policy_type):
vis_out[0] += vis_in
vis_out[1] += vis_in
return XYYX_FROM_U
elif policy_type.literal_value == "XXXYYXYY_FROM_U":
def XXXYYXYY_FROM_U(vis_in, vis_out, policy_type):
vis_out[0] += 0
vis_out[1] += vis_in
vis_out[2] += vis_in
vis_out[3] += 0
return XXXYYXYY_FROM_U
elif policy_type.literal_value == "RLLR_FROM_U":
def RLLR_FROM_U(vis_in, vis_out, policy_type):
vis_out[0] += 1.0j * vis_in
vis_out[1] += -1.0j * vis_in
return RLLR_FROM_U
elif policy_type.literal_value == "RRRLLRLL_FROM_U":
def RRRLLRLL_FROM_U(vis_in, vis_out, policy_type):
vis_out[0] += 0.0
vis_out[1] += 1.0j * vis_in
vis_out[2] += -1.0j * vis_in
vis_out[3] += 0.0
return RRRLLRLL_FROM_U
elif policy_type.literal_value == "XYYX_FROM_V":
def XYYX_FROM_V(vis_in, vis_out, policy_type):
vis_out[0] += 1.0j * vis_in
vis_out[1] += -1.0j * vis_in
return XYYX_FROM_V
elif policy_type.literal_value == "XXXYYXYY_FROM_V":
def XXXYYXYY_FROM_V(vis_in, vis_out, policy_type):
vis_out[0] += 0.0
vis_out[1] += 1.0j * vis_in
vis_out[2] += -1.0j * vis_in
vis_out[3] += 0.0
return XXXYYXYY_FROM_V
elif policy_type.literal_value == "RRLL_FROM_V":
def RRLL_FROM_V(vis_in, vis_out, policy_type):
vis_out[0] += vis_in
vis_out[1] += -vis_in
return RRLL_FROM_V
elif policy_type.literal_value == "RRRLLRLL_FROM_V":
def RRRLLRLL_FROM_V(vis_in, vis_out, policy_type):
vis_out[0] += vis_in
vis_out[1] += 0
vis_out[2] += 0
vis_out[3] += -vis_in
return RRRLLRLL_FROM_V
else:
raise ValueError("Invalid stokes conversion")
def corr2stokes(vis_in, policy_type):
pass
@overload(corr2stokes, inline="always")
def corr2stokesimpl(vis_in, policy_type):
if policy_type.literal_value == "I_FROM_XXYY":
return lambda vis_in, policy_type: (vis_in[0] + vis_in[1]) * 0.5
elif policy_type.literal_value == "I_FROM_XXXYYXYY":
return lambda vis_in, policy_type: (vis_in[0] + vis_in[3]) * 0.5
elif policy_type.literal_value == "I_FROM_RRLL":
return lambda vis_in, policy_type: (vis_in[0] + vis_in[1]) * 0.5
elif policy_type.literal_value == "I_FROM_RRRLLRLL":
return lambda vis_in, policy_type: (vis_in[0] + vis_in[3]) * 0.5
elif policy_type.literal_value == "Q_FROM_XXYY":
return lambda vis_in, policy_type: (vis_in[0] - vis_in[1]) * 0.5
elif policy_type.literal_value == "Q_FROM_XXXYYXYY":
return lambda vis_in, policy_type: (vis_in[0] - vis_in[3]) * 0.5
elif policy_type.literal_value == "Q_FROM_RRRLLRLL":
return lambda vis_in, policy_type: (vis_in[1] + vis_in[2]) * 0.5
elif policy_type.literal_value == "U_FROM_XYYX":
return lambda vis_in, policy_type: (vis_in[0] + vis_in[1]) * 0.5
elif policy_type.literal_value == "U_FROM_XXXYYXYY":
return lambda vis_in, policy_type: (vis_in[1] + vis_in[2]) * 0.5
elif policy_type.literal_value == "U_FROM_RLLR":
return lambda vis_in, policy_type: -1.0j * (vis_in[0] - vis_in[1]
) * 0.5
elif policy_type.literal_value == "U_FROM_RRRLLRLL":
return lambda vis_in, policy_type: -1.0j * (vis_in[1] - vis_in[2]
) * 0.5
elif policy_type.literal_value == "V_FROM_RRLL":
return lambda vis_in, policy_type: (vis_in[0] - vis_in[1]) * 0.5
elif policy_type.literal_value == "V_FROM_RRRLLRLL":
return lambda vis_in, policy_type: (vis_in[0] - vis_in[3]) * 0.5
elif policy_type.literal_value == "V_FROM_XYYX":
return lambda vis_in, policy_type: -1.0j * (vis_in[0] - vis_in[1]
) * 0.5
elif policy_type.literal_value == "V_FROM_XXXYYXYY":
return lambda vis_in, policy_type: -1.0j * (vis_in[1] - vis_in[2]
) * 0.5
else:
raise ValueError("Invalid stokes conversion")
def ncorr_out(policy_type):
pass
@overload(ncorr_out, inline="always")
def ncorr_outimpl(policy_type):
if policy_type.literal_value == "XXYY_FROM_I":
return lambda policy_type: 2
elif policy_type.literal_value == "XXXYYXYY_FROM_I":
return lambda policy_type: 4
elif policy_type.literal_value == "RRLL_FROM_I":
return lambda policy_type: 2
elif policy_type.literal_value == "RRRLLRLL_FROM_I":
return lambda policy_type: 4
elif policy_type.literal_value == "XXYY_FROM_Q":
return lambda policy_type: 2
elif policy_type.literal_value == "XXXYYXYY_FROM_Q":
return lambda policy_type: 4
elif policy_type.literal_value == "RLLR_FROM_Q":
return lambda policy_type: 2
elif policy_type.literal_value == "RRRLLRLL_FROM_Q":
return lambda policy_type: 4
elif policy_type.literal_value == "XYYX_FROM_U":
return lambda policy_type: 2
elif policy_type.literal_value == "XXXYYXYY_FROM_U":
return lambda policy_type: 4
elif policy_type.literal_value == "RLLR_FROM_U":
return lambda policy_type: 2
elif policy_type.literal_value == "RRRLLRLL_FROM_U":
return lambda policy_type: 4
elif policy_type.literal_value == "XYYX_FROM_V":
return lambda policy_type: 2
elif policy_type.literal_value == "XXXYYXYY_FROM_V":
return lambda policy_type: 4
elif policy_type.literal_value == "RRLL_FROM_V":
return lambda policy_type: 2
elif policy_type.literal_value == "RRRLLRLL_FROM_V":
return lambda policy_type: 4
else:
raise ValueError("Invalid stokes conversion")
def ncorr_outpy(policy_type):
if policy_type == "XXYY_FROM_I":
return lambda: 2
elif policy_type == "XXXYYXYY_FROM_I":
return lambda: 4
elif policy_type == "RRLL_FROM_I":
return lambda: 2
elif policy_type == "RRRLLRLL_FROM_I":
return lambda: 4
elif policy_type == "XXYY_FROM_Q":
return lambda: 2
elif policy_type == "XXXYYXYY_FROM_Q":
return lambda: 4
elif policy_type == "RLLR_FROM_Q":
return lambda: 2
elif policy_type == "RRRLLRLL_FROM_Q":
return lambda: 4
elif policy_type == "XYYX_FROM_U":
return lambda: 2
elif policy_type == "XXXYYXYY_FROM_U":
return lambda: 4
elif policy_type == "RLLR_FROM_U":
return lambda: 2
elif policy_type == "RRRLLRLL_FROM_U":
return lambda: 4
elif policy_type == "XYYX_FROM_V":
return lambda: 2
elif policy_type == "XXXYYXYY_FROM_V":
return lambda: 4
elif policy_type == "RRLL_FROM_V":
return lambda: 2
elif policy_type == "RRRLLRLL_FROM_V":
return lambda: 4
else:
raise ValueError("Invalid stokes conversion")
```
#### File: perleypolyhedron/tests/test_daskintrf.py
```python
import time
import numpy as np
import pytest
from africanus.gridding.perleypolyhedron import (kernels,
gridder,
degridder)
from africanus.gridding.perleypolyhedron import dask as dwrap
from africanus.dft.kernels import im_to_vis
from africanus.constants import c as lightspeed
class clock:
def __init__(self, identifier="untitled"):
self._id = identifier
self._elapsed = 0.0
self._onenter = 0.0
self._onexit = 0.0
def __enter__(self):
self._onenter = time.time()
return self
def __exit__(self, extype, exval, tb):
self._onexit = time.time()
self._elapsed = self._onexit - self._onenter
@property
def elapsed(self):
return self._elapsed
def __str__(self):
res = "{0:s}: Walltime {1:.0f}m{2:.2f}s elapsed".format(
self._id, self.elapsed // 60,
self.elapsed - (self.elapsed // 60) * 60)
return res
__repr__ = __str__
def test_gridder_dask():
da = pytest.importorskip("dask.array")
with clock("DASK gridding") as tictoc:
# construct kernel
W = 5
OS = 9
kern = kernels.pack_kernel(kernels.kbsinc(W, oversample=OS), W, OS)
nrow = int(1e6)
np.random.seed(0)
# simulate some ficticious baselines rotated by an hour angle
row_chunks = nrow // 10
uvw = np.zeros((nrow, 3), dtype=np.float64)
blpos = np.random.uniform(26, 10000, size=(25, 3))
ntime = int(nrow / 25.0)
d0 = np.pi / 4.0
for n in range(25):
for ih0, h0 in enumerate(
np.linspace(np.deg2rad(-20), np.deg2rad(20), ntime)):
s = np.sin
c = np.cos
R = np.array([[s(h0), c(h0), 0],
[-s(d0) * c(h0),
s(d0) * s(h0),
c(d0)],
[c(d0) * c(h0), -c(d0) * s(h0),
s(d0)]])
uvw[n * ntime + ih0, :] = np.dot(R, blpos[n, :].T)
uvw = da.from_array(uvw, chunks=(row_chunks, 3))
pxacrossbeam = 5
nchan = 128
frequency = da.from_array(np.linspace(1.0e9, 1.4e9, nchan),
chunks=(nchan, ))
wavelength = lightspeed / frequency
cell = da.rad2deg(
wavelength[0] /
(max(da.max(da.absolute(uvw[:, 0])),
da.max(da.absolute(uvw[:, 1]))) * pxacrossbeam))
npixfacet = 100
fftpad = 1.1
image_centres = da.from_array(np.array([[0, d0]]), chunks=(1, 2))
chanmap = da.from_array(np.zeros(nchan, dtype=np.int64),
chunks=(nchan, ))
detaper_facet = kernels.compute_detaper_dft_seperable(
int(npixfacet * fftpad), kernels.unpack_kernel(kern, W, OS), W,
OS)
vis_dft = da.ones(shape=(nrow, nchan, 2),
chunks=(row_chunks, nchan, 2),
dtype=np.complex64)
vis_grid_facet = dwrap.gridder(
uvw,
vis_dft,
wavelength,
chanmap,
int(npixfacet * fftpad),
cell * 3600.0,
image_centres, (0, d0),
kern,
W,
OS,
"None",
"None",
"I_FROM_XXYY",
"conv_1d_axisymmetric_packed_scatter",
do_normalize=True)
vis_grid_facet = vis_grid_facet.compute()
ftvisfacet = (np.fft.fftshift(
np.fft.ifft2(np.fft.ifftshift(
vis_grid_facet[0, :, :]))).reshape(
(1, int(npixfacet * fftpad), int(
npixfacet * fftpad)))).real / detaper_facet * int(
npixfacet * fftpad)**2
ftvisfacet = ftvisfacet[:,
int(npixfacet * fftpad) // 2 - npixfacet //
2:int(npixfacet * fftpad) // 2 -
npixfacet // 2 + npixfacet,
int(npixfacet * fftpad) // 2 - npixfacet //
2:int(npixfacet * fftpad) // 2 -
npixfacet // 2 + npixfacet]
print(tictoc)
assert (np.abs(np.max(ftvisfacet[0, :, :]) - 1.0) < 1.0e-6)
def test_gridder_nondask():
with clock("Non-DASK gridding") as tictoc:
# construct kernel
W = 5
OS = 9
kern = kernels.pack_kernel(kernels.kbsinc(W, oversample=OS), W, OS)
nrow = int(1e6)
np.random.seed(0)
# simulate some ficticious baselines rotated by an hour angle
uvw = np.zeros((nrow, 3), dtype=np.float64)
blpos = np.random.uniform(26, 10000, size=(25, 3))
ntime = int(nrow / 25.0)
d0 = np.pi / 4.0
for n in range(25):
for ih0, h0 in enumerate(
np.linspace(np.deg2rad(-20), np.deg2rad(20), ntime)):
s = np.sin
c = np.cos
R = np.array([[s(h0), c(h0), 0],
[-s(d0) * c(h0),
s(d0) * s(h0),
c(d0)],
[c(d0) * c(h0), -c(d0) * s(h0),
s(d0)]])
uvw[n * ntime + ih0, :] = np.dot(R, blpos[n, :].T)
pxacrossbeam = 5
nchan = 128
frequency = np.linspace(1.0e9, 1.4e9, nchan)
wavelength = lightspeed / frequency
cell = np.rad2deg(
wavelength[0] /
(max(np.max(np.absolute(uvw[:, 0])),
np.max(np.absolute(uvw[:, 1]))) * pxacrossbeam))
npixfacet = 100
fftpad = 1.1
image_centres = np.array([[0, d0]])
chanmap = np.zeros(nchan, dtype=np.int64)
detaper_facet = kernels.compute_detaper_dft_seperable(
int(npixfacet * fftpad), kernels.unpack_kernel(kern, W, OS), W,
OS)
vis_dft = np.ones((nrow, nchan, 2), dtype=np.complex64)
vis_grid_facet = gridder.gridder(
uvw,
vis_dft,
wavelength,
chanmap,
int(npixfacet * fftpad),
cell * 3600.0,
image_centres[0, :], (0, d0),
kern,
W,
OS,
"None",
"None",
"I_FROM_XXYY",
"conv_1d_axisymmetric_packed_scatter",
do_normalize=True)
ftvisfacet = (np.fft.fftshift(
np.fft.ifft2(np.fft.ifftshift(
vis_grid_facet[0, :, :]))).reshape(
(1, int(npixfacet * fftpad), int(
npixfacet * fftpad)))).real / detaper_facet * int(
npixfacet * fftpad)**2
ftvisfacet = ftvisfacet[:,
int(npixfacet * fftpad) // 2 - npixfacet //
2:int(npixfacet * fftpad) // 2 -
npixfacet // 2 + npixfacet,
int(npixfacet * fftpad) // 2 - npixfacet //
2:int(npixfacet * fftpad) // 2 -
npixfacet // 2 + npixfacet]
print(tictoc)
assert (np.abs(np.max(ftvisfacet[0, :, :]) - 1.0) < 1.0e-6)
def test_degrid_dft_packed_nondask():
# construct kernel
W = 5
OS = 3
kern = kernels.pack_kernel(kernels.kbsinc(W, oversample=OS),
W,
oversample=OS)
nrow = int(5e4)
uvw = np.column_stack(
(5000.0 * np.cos(np.linspace(0, 2 * np.pi, nrow)),
5000.0 * np.sin(np.linspace(0, 2 * np.pi, nrow)), np.zeros(nrow)))
pxacrossbeam = 10
nchan = 1024
frequency = np.linspace(1.0e9, 1.4e9, nchan)
wavelength = lightspeed / frequency
cell = np.rad2deg(
wavelength[0] /
(2 * max(np.max(np.abs(uvw[:, 0])), np.max(np.abs(uvw[:, 1]))) *
pxacrossbeam))
npix = 512
mod = np.zeros((1, npix, npix), dtype=np.complex64)
mod[0, npix // 2 - 5, npix // 2 - 5] = 1.0
ftmod = np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(
mod[0, :, :]))).reshape((1, npix, npix))
chanmap = np.zeros(nchan, dtype=np.int64)
with clock("Non-DASK degridding") as tictoc:
degridder.degridder(
uvw,
ftmod,
wavelength,
chanmap,
cell * 3600.0,
(0, np.pi / 4.0),
(0, np.pi / 4.0),
kern,
W,
OS,
"None", # no faceting
"None", # no faceting
"XXYY_FROM_I",
"conv_1d_axisymmetric_packed_gather")
print(tictoc)
def test_degrid_dft_packed_dask():
da = pytest.importorskip("dask.array")
# construct kernel
W = 5
OS = 3
kern = kernels.pack_kernel(kernels.kbsinc(W, oversample=OS),
W,
oversample=OS)
nrow = int(5e4)
nrow_chunk = nrow // 32
uvw = np.column_stack(
(5000.0 * np.cos(np.linspace(0, 2 * np.pi, nrow)),
5000.0 * np.sin(np.linspace(0, 2 * np.pi, nrow)), np.zeros(nrow)))
pxacrossbeam = 10
nchan = 1024
frequency = np.linspace(1.0e9, 1.4e9, nchan)
wavelength = lightspeed / frequency
cell = np.rad2deg(
wavelength[0] /
(2 * max(np.max(np.abs(uvw[:, 0])), np.max(np.abs(uvw[:, 1]))) *
pxacrossbeam))
npix = 512
mod = np.zeros((1, npix, npix), dtype=np.complex64)
mod[0, npix // 2 - 5, npix // 2 - 5] = 1.0
ftmod = np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(
mod[0, :, :]))).reshape((1, 1, npix, npix))
chanmap = np.zeros(nchan, dtype=np.int64)
with clock("DASK degridding") as tictoc:
vis_degrid = dwrap.degridder(
da.from_array(uvw, chunks=(nrow_chunk, 3)),
da.from_array(ftmod, chunks=(1, 1, npix, npix)),
da.from_array(wavelength, chunks=(nchan, )),
da.from_array(chanmap, chunks=(nchan, )),
cell * 3600.0,
da.from_array(np.array([[0, np.pi / 4.0]]), chunks=(1, 2)),
(0, np.pi / 4.0),
kern,
W,
OS,
"None", # no faceting
"None", # no faceting
"XXYY_FROM_I",
"conv_1d_axisymmetric_packed_gather")
vis_degrid = vis_degrid.compute()
print(tictoc)
def test_degrid_dft_packed_dask_dft_check():
da = pytest.importorskip("dask.array")
# construct kernel
W = 5
OS = 3
kern = kernels.pack_kernel(kernels.kbsinc(W, oversample=OS),
W,
oversample=OS)
nrow = 100
nrow_chunk = nrow // 8
uvw = np.column_stack(
(5000.0 * np.cos(np.linspace(0, 2 * np.pi, nrow)),
5000.0 * np.sin(np.linspace(0, 2 * np.pi, nrow)), np.zeros(nrow)))
pxacrossbeam = 10
nchan = 16
frequency = np.linspace(1.0e9, 1.4e9, nchan)
wavelength = lightspeed / frequency
cell = np.rad2deg(
wavelength[0] /
(2 * max(np.max(np.abs(uvw[:, 0])), np.max(np.abs(uvw[:, 1]))) *
pxacrossbeam))
npix = 512
mod = np.zeros((1, npix, npix), dtype=np.complex64)
mod[0, npix // 2 - 5, npix // 2 - 5] = 1.0
ftmod = np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(
mod[0, :, :]))).reshape((1, 1, npix, npix))
chanmap = np.zeros(nchan, dtype=np.int64)
dec, ra = np.meshgrid(
np.arange(-npix // 2, npix // 2) * np.deg2rad(cell),
np.arange(-npix // 2, npix // 2) * np.deg2rad(cell))
radec = np.column_stack((ra.flatten(), dec.flatten()))
vis_dft = im_to_vis(mod[0, :, :].reshape(1, 1, npix * npix).T.copy(),
uvw, radec, frequency)
vis_degrid = dwrap.degridder(
da.from_array(uvw, chunks=(nrow_chunk, 3)),
da.from_array(ftmod, chunks=(1, 1, npix, npix)),
da.from_array(wavelength, chunks=(nchan, )),
da.from_array(chanmap, chunks=(nchan, )),
cell * 3600.0,
da.from_array(np.array([[0, np.pi / 4.0]]), chunks=(1, 2)),
(0, np.pi / 4.0),
kern,
W,
OS,
"None", # no faceting
"None", # no faceting
"XXYY_FROM_I",
"conv_1d_axisymmetric_packed_gather")
vis_degrid = vis_degrid.compute()
assert np.percentile(
np.abs(vis_dft[:, 0, 0].real - vis_degrid[:, 0, 0].real),
99.0) < 0.05
assert np.percentile(
np.abs(vis_dft[:, 0, 0].imag - vis_degrid[:, 0, 0].imag),
99.0) < 0.05
```
#### File: gridding/wgridder/im2residim.py
```python
try:
from ducc0.wgridder import dirty2ms, ms2dirty
except ImportError as e:
ducc_import_error = e
else:
ducc_import_error = None
import numpy as np
from africanus.util.docs import DocstringTemplate
from africanus.util.requirements import requires_optional
@requires_optional('ducc0.wgridder', ducc_import_error)
def _residual_internal(uvw, freq, image, vis, freq_bin_idx, freq_bin_counts,
cell, weights, flag, celly, epsilon, nthreads,
do_wstacking, double_accum):
# adjust for chunking
# need a copy here if using multiple row chunks
freq_bin_idx2 = freq_bin_idx - freq_bin_idx.min()
nband = freq_bin_idx.size
_, nx, ny = image.shape
# the extra dimension is required to allow for chunking over row
residim = np.zeros((1, nband, nx, ny), dtype=image.dtype)
for i in range(nband):
ind = slice(freq_bin_idx2[i], freq_bin_idx2[i] + freq_bin_counts[i])
if weights is not None:
wgt = weights[:, ind]
else:
wgt = None
if flag is not None:
mask = flag[:, ind]
else:
mask = None
tvis = vis[:, ind]
residvis = tvis - dirty2ms(
uvw=uvw, freq=freq[ind],
dirty=image[i], wgt=None,
pixsize_x=cell, pixsize_y=celly,
nu=0, nv=0, epsilon=epsilon,
nthreads=nthreads, mask=mask,
do_wstacking=do_wstacking)
residim[0, i] = ms2dirty(uvw=uvw, freq=freq[ind], ms=residvis,
wgt=wgt, npix_x=nx, npix_y=ny,
pixsize_x=cell, pixsize_y=celly,
nu=0, nv=0, epsilon=epsilon,
nthreads=nthreads, mask=mask,
do_wstacking=do_wstacking,
double_precision_accumulation=double_accum)
return residim
# This additional wrapper is required to allow the dask wrappers
# to chunk over row
@requires_optional('ducc0.wgridder', ducc_import_error)
def residual(uvw, freq, image, vis, freq_bin_idx, freq_bin_counts, cell,
weights=None, flag=None, celly=None, epsilon=1e-5, nthreads=1,
do_wstacking=True, double_accum=False):
if celly is None:
celly = cell
if not nthreads:
import multiprocessing
nthreads = multiprocessing.cpu_count()
residim = _residual_internal(uvw, freq, image, vis, freq_bin_idx,
freq_bin_counts, cell, weights, flag,
celly, epsilon, nthreads, do_wstacking,
double_accum)
return residim[0]
RESIDUAL_DOCS = DocstringTemplate(
r"""
Compute residual image given a model and visibilities using ducc
degridder i.e.
.. math::
I^R = R^\dagger \Sigma^{-1}(V - Rx)
where :math:`R` is an implicit degridding operator, :math:`V`
denotes visibilities of shape :code:`(row, chan)` and
:math:`x` is the image of shape :code:`(band, nx, ny)`.
The number of imaging bands :code:`(band)` must
be less than or equal to the number of channels
:code:`(chan)` at which the data were obtained.
The mapping from :code:`(chan)` to :code:`(band)` is described
by :code:`freq_bin_idx` and :code:`freq_bin_counts` as
described below.
Note that, if the gridding and degridding operators both apply
the square root of the imaging weights then the visibilities
that are passed in should be pre-whitened. In this case the
function computes
.. math::
I^R = R^\dagger \Sigma^{-\frac{1}{2}}(\tilde{V}
- \Sigma^{-\frac{1}{2}}Rx)
which is identical to the above expression if
:math:`\tilde{V} = \Sigma^{-\frac{1}{2}}V`.
Parameters
----------
uvw : $(array_type)
uvw coordinates at which visibilities were
obtained with shape :code:`(row, 3)`.
freq : $(array_type)
Observational frequencies of shape :code:`(chan,)`.
model : $(array_type)
Model image to degrid of shape :code:`(band, nx, ny)`.
vis : $(array_type)
Visibilities of shape :code:`(row,chan)`.
weights : $(array_type)
Imaging weights of shape :code:`(row, chan)`.
freq_bin_idx : $(array_type)
Starting indices of frequency bins for each imaging
band of shape :code:`(band,)`.
freq_bin_counts : $(array_type)
The number of channels in each imaging band of shape :code:`(band,)`.
cell : float
The cell size of a pixel along the :math:`x` direction in radians.
flag: $(array_type), optional
Flags of shape :code:`(row,chan)`. Will only process visibilities
for which flag!=0
celly : float, optional
The cell size of a pixel along the :math:`y` direction in radians.
By default same as cell size along :math:`x` direction.
nu : int, optional
The number of pixels in the padded grid along the :math:`x` direction.
Chosen automatically by default.
nv : int, optional
The number of pixels in the padded grid along the :math:`y` direction.
Chosen automatically by default.
epsilon : float, optional
The precision of the gridder with respect to the direct Fourier
transform. By deafult, this is set to :code:`1e-5` for single
precision and :code:`1e-7` for double precision.
nthreads : int, optional
The number of threads to use. Defaults to one.
do_wstacking : bool, optional
Whether to correct for the w-term or not. Defaults to True
double_accum : bool, optional
If true ducc will accumulate in double precision regardless of
the input type.
Returns
-------
residual : $(array_type)
Residual image corresponding to :code:`model` of shape
:code:`(band, nx, ny)`.
""")
try:
residual.__doc__ = RESIDUAL_DOCS.substitute(
array_type=":class:`numpy.ndarray`")
except AttributeError:
pass
```
#### File: wgridder/tests/test_wgridder.py
```python
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal
import pytest
from africanus.constants import c as lightspeed
pmp = pytest.mark.parametrize
def _l2error(a, b):
return np.sqrt(np.sum(np.abs(a-b)**2)/np.maximum(np.sum(np.abs(a)**2),
np.sum(np.abs(b)**2)))
def explicit_gridder(uvw, freq, ms, wgt, nxdirty, nydirty, xpixsize, ypixsize,
apply_w):
x, y = np.meshgrid(*[-ss/2 + np.arange(ss) for ss in [nxdirty, nydirty]],
indexing='ij')
x *= xpixsize
y *= ypixsize
res = np.zeros((nxdirty, nydirty))
eps = x**2+y**2
if apply_w:
nm1 = -eps/(np.sqrt(1.-eps)+1.)
n = nm1+1
else:
nm1 = 0.
n = 1.
for row in range(ms.shape[0]):
for chan in range(ms.shape[1]):
phase = (freq[chan]/lightspeed *
(x*uvw[row, 0] + y*uvw[row, 1] - uvw[row, 2]*nm1))
if wgt is None:
res += (ms[row, chan]*np.exp(2j*np.pi*phase)).real
else:
res += (ms[row, chan]*wgt[row, chan]
* np.exp(2j*np.pi*phase)).real
return res/n
@pmp("nx", (16,))
@pmp("ny", (18, 64))
@pmp("fov", (5.0,))
@pmp("nrow", (1000,))
@pmp("nchan", (1, 7))
@pmp("nband", (1, 3))
@pmp("precision", ('single', 'double'))
@pmp("epsilon", (1e-3, 1e-4))
@pmp("nthreads", (1, 6))
def test_gridder(nx, ny, fov, nrow, nchan, nband,
precision, epsilon, nthreads):
# run comparison against dft with a frequency mapping imposed
if nband > nchan:
return
from africanus.gridding.wgridder import dirty
if precision == 'single':
real_type = "f4"
complex_type = "c8"
else:
real_type = "f8"
complex_type = "c16"
np.random.seed(420)
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = nchan//nband
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int16)
freq_bin_counts = np.array([1], dtype=np.int16)
image = dirty(uvw, freq, vis, freq_bin_idx, freq_bin_counts, nx, ny, cell,
weights=wgt, nthreads=nthreads)
nband = freq_bin_idx.size
ref = np.zeros((nband, nx, ny), dtype=np.float64)
for i in range(nband):
ind = slice(freq_bin_idx[i], freq_bin_idx[i] + freq_bin_counts[i])
ref[i] = explicit_gridder(uvw, freq[ind], vis[:, ind], wgt[:, ind],
nx, ny, cell, cell, True)
# l2 error should be within epsilon of zero
assert_allclose(_l2error(image, ref), 0, atol=epsilon)
@pmp("nx", (30,))
@pmp("ny", (50, 128))
@pmp("fov", (0.5, 2.5))
@pmp("nrow", (333, 5000,))
@pmp("nchan", (1, 4))
@pmp("nband", (1, 2))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (6,))
def test_adjointness(nx, ny, fov, nrow, nchan, nband,
precision, nthreads):
# instead of explicitly testing the degridder we can just check that
# it is consistent with the gridder i.e.
#
# <R.H y, x> = <y.H, Rx>
#
# where R.H is the gridder, R is the degridder and x and y are randomly
# drawn image and visibilities respectively
if nband > nchan:
return
from africanus.gridding.wgridder import dirty, model
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
tol = 1e-4
else:
real_type = np.float64
complex_type = np.complex128
tol = 1e-12
np.random.seed(420)
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = nchan//nband
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
image = dirty(uvw, freq, vis, freq_bin_idx, freq_bin_counts, nx, ny, cell,
weights=wgt, nthreads=nthreads)
model_im = np.random.randn(nband, nx, ny).astype(real_type)
modelvis = model(uvw, freq, model_im, freq_bin_idx, freq_bin_counts,
cell, weights=wgt, nthreads=nthreads)
# should have relative tolerance close to machine precision
assert_allclose(np.vdot(vis, modelvis).real, np.vdot(image, model_im),
rtol=tol)
@pmp("nx", (20, ))
@pmp("ny", (32, 70))
@pmp("fov", (1.5, 3.5))
@pmp("nrow", (222, 777,))
@pmp("nchan", (1, 5))
@pmp("nband", (1, 3))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (3,))
def test_residual(nx, ny, fov, nrow, nchan, nband,
precision, nthreads):
# Compare the result of im2residim to
# VR = V - Rx - computed with im2vis
# IR = R.H VR - computed with vis2im
from africanus.gridding.wgridder import dirty, model, residual
np.random.seed(420)
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
decimal = 4
else:
real_type = np.float64
complex_type = np.complex128
decimal = 12
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = nchan//nband
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
model_im = np.random.randn(nband, nx, ny).astype(real_type)
modelvis = model(uvw, freq, model_im, freq_bin_idx, freq_bin_counts, cell,
nthreads=nthreads)
residualvis = vis - modelvis
residim1 = dirty(uvw, freq, residualvis, freq_bin_idx, freq_bin_counts,
nx, ny, cell, weights=wgt, nthreads=nthreads)
residim2 = residual(uvw, freq, model_im, vis, freq_bin_idx,
freq_bin_counts, cell, weights=wgt,
nthreads=nthreads)
# These are essentially computing the same thing just in a different
# order so should be close to machine precision
rmax = np.maximum(np.abs(residim1).max(), np.abs(residim2).max())
assert_array_almost_equal(
residim1/rmax, residim2/rmax, decimal=decimal)
@pmp("nx", (128, ))
@pmp("ny", (256,))
@pmp("fov", (0.5,))
@pmp("nrow", (10000000,))
@pmp("nchan", (2,))
@pmp("nband", (2,))
@pmp("precision", ('single',))
@pmp("nthreads", (4,))
def test_hessian(nx, ny, fov, nrow, nchan, nband,
precision, nthreads):
# Compare the result of dirty computed with Hessian
# ID = hessian(x)
# to that computed using dirty.
from africanus.gridding.wgridder import dirty, hessian
np.random.seed(420)
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
atol = 1e-5
else:
real_type = np.float64
complex_type = np.complex128
atol = 1e-5
uvw = 1000*np.random.randn(nrow, 3)
uvw[:, 2] = 0
u_max = np.abs(uvw[:, 0]).max()
v_max = np.abs(uvw[:, 1]).max()
uv_max = np.maximum(u_max, v_max)
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
cell_N = 0.1/(2*uv_max*freq.max()/lightspeed)
cell = cell_N/2.0 # super_resolution_factor of 2
vis = np.ones((nrow, nchan), dtype=complex_type)
step = nchan//nband
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
model_im = np.zeros((nband, nx, ny), dtype=real_type)
model_im[:, nx//2, ny//2] = 1.0
dirty_im1 = dirty(uvw, freq, vis, freq_bin_idx, freq_bin_counts,
nx, ny, cell, nthreads=nthreads, do_wstacking=False,
double_accum=True)
# test accumulation
assert_allclose(dirty_im1.max()/nrow, 1.0, rtol=atol)
dirty_im2 = hessian(uvw, freq, model_im, freq_bin_idx,
freq_bin_counts, cell, nthreads=nthreads,
do_wstacking=False, double_accum=True)
# rtol not reliable since there will be values close to zero in the
# dirty images
assert_allclose(dirty_im1/nrow, dirty_im2/nrow, atol=atol, rtol=1e-2)
@pmp("nx", (30, 250))
@pmp("ny", (128,))
@pmp("fov", (5.0,))
@pmp("nrow", (3333, 10000))
@pmp("nchan", (1, 8))
@pmp("nband", (1, 2))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (1, 4))
@pmp("nchunks", (1, 3))
def test_dask_dirty(nx, ny, fov, nrow, nchan, nband,
precision, nthreads, nchunks):
da = pytest.importorskip("dask.array")
from africanus.gridding.wgridder import dirty as dirty_np
from africanus.gridding.wgridder.dask import dirty
np.random.seed(420)
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
decimal = 4 # sometimes fails at 5
else:
real_type = np.float64
complex_type = np.complex128
decimal = 5
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = np.maximum(1, nchan//nband)
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
image = dirty_np(uvw, freq, vis, freq_bin_idx, freq_bin_counts, nx, ny,
cell, weights=wgt, nthreads=nthreads)
# now get result using dask
rows_per_task = int(np.ceil(nrow/nchunks))
row_chunks = (nchunks-1) * (rows_per_task,)
row_chunks += (nrow - np.sum(row_chunks),)
freq_da = da.from_array(freq, chunks=step)
uvw_da = da.from_array(uvw, chunks=(row_chunks, -1))
vis_da = da.from_array(vis, chunks=(row_chunks, step))
wgt_da = da.from_array(wgt, chunks=(row_chunks, step))
freq_bin_idx_da = da.from_array(freq_bin_idx, chunks=1)
freq_bin_counts_da = da.from_array(freq_bin_counts, chunks=1)
image_da = dirty(uvw_da, freq_da, vis_da, freq_bin_idx_da,
freq_bin_counts_da, nx, ny, cell, weights=wgt_da,
nthreads=nthreads).compute()
# relative error should agree to within epsilon
dmax = np.maximum(np.abs(image).max(), np.abs(image_da).max())
assert_array_almost_equal(image/dmax, image_da/dmax,
decimal=decimal)
@pmp("nx", (30, 250))
@pmp("ny", (128,))
@pmp("fov", (5.0,))
@pmp("nrow", (3333, 10000))
@pmp("nchan", (1, 8))
@pmp("nband", (1, 2))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (1, 4))
@pmp("nchunks", (1, 3))
def test_dask_model(nx, ny, fov, nrow, nchan, nband,
precision, nthreads, nchunks):
da = pytest.importorskip("dask.array")
from africanus.gridding.wgridder import model as model_np
from africanus.gridding.wgridder.dask import model
np.random.seed(420)
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
decimal = 4 # sometimes fails at 5
else:
real_type = np.float64
complex_type = np.complex128
decimal = 5
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = np.maximum(1, nchan//nband)
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int16)
freq_bin_counts = np.array([1], dtype=np.int16)
nband = freq_bin_idx.size
image = np.random.randn(nband, nx, ny).astype(real_type)
vis = model_np(uvw, freq, image, freq_bin_idx, freq_bin_counts, cell,
weights=wgt, nthreads=nthreads)
# now get result using dask
rows_per_task = int(np.ceil(nrow/nchunks))
row_chunks = (nchunks-1) * (rows_per_task,)
row_chunks += (nrow - np.sum(row_chunks),)
freq_da = da.from_array(freq, chunks=step)
uvw_da = da.from_array(uvw, chunks=(row_chunks, -1))
image_da = da.from_array(image, chunks=(1, nx, ny))
wgt_da = da.from_array(wgt, chunks=(row_chunks, step))
freq_bin_idx_da = da.from_array(freq_bin_idx, chunks=1)
freq_bin_counts_da = da.from_array(freq_bin_counts, chunks=1)
vis_da = model(uvw_da, freq_da, image_da, freq_bin_idx_da,
freq_bin_counts_da, cell, weights=wgt_da,
nthreads=nthreads).compute()
# relative error should agree to within epsilon
vmax = np.maximum(np.abs(vis).max(), np.abs(vis_da).max())
assert_array_almost_equal(vis/vmax, vis_da/vmax,
decimal=decimal)
@pmp("nx", (30, 250))
@pmp("ny", (128,))
@pmp("fov", (5.0,))
@pmp("nrow", (3333, 10000))
@pmp("nchan", (1, 8))
@pmp("nband", (1, 2))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (1, 4))
@pmp("nchunks", (1, 3))
def test_dask_residual(nx, ny, fov, nrow, nchan, nband,
precision, nthreads, nchunks):
da = pytest.importorskip("dask.array")
from africanus.gridding.wgridder import residual as residual_np
from africanus.gridding.wgridder.dask import residual
np.random.seed(420)
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
decimal = 4 # sometimes fails at 5
else:
real_type = np.float64
complex_type = np.complex128
decimal = 5
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = np.maximum(1, nchan//nband)
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
image = np.random.randn(nband, nx, ny).astype(real_type)
residim_np = residual_np(uvw, freq, image, vis, freq_bin_idx,
freq_bin_counts, cell, weights=wgt,
nthreads=nthreads)
rows_per_task = int(np.ceil(nrow/nchunks))
row_chunks = (nchunks-1) * (rows_per_task,)
row_chunks += (nrow - np.sum(row_chunks),)
freq_da = da.from_array(freq, chunks=step)
uvw_da = da.from_array(uvw, chunks=(row_chunks, -1))
image_da = da.from_array(image, chunks=(1, nx, ny))
vis_da = da.from_array(vis, chunks=(row_chunks, step))
wgt_da = da.from_array(wgt, chunks=(row_chunks, step))
freq_bin_idx_da = da.from_array(freq_bin_idx, chunks=1)
freq_bin_counts_da = da.from_array(freq_bin_counts, chunks=1)
residim_da = residual(uvw_da, freq_da, image_da, vis_da,
freq_bin_idx_da, freq_bin_counts_da,
cell, weights=wgt_da, nthreads=nthreads).compute()
# should agree to within epsilon
rmax = np.maximum(np.abs(residim_np).max(), np.abs(residim_da).max())
assert_array_almost_equal(
residim_np/rmax, residim_da/rmax, decimal=decimal)
@pmp("nx", (64,))
@pmp("ny", (128,))
@pmp("fov", (5.0,))
@pmp("nrow", (3333, 10000))
@pmp("nchan", (4,))
@pmp("nband", (2,))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (1,))
@pmp("nchunks", (1, 3))
def test_dask_hessian(nx, ny, fov, nrow, nchan, nband,
precision, nthreads, nchunks):
da = pytest.importorskip("dask.array")
from africanus.gridding.wgridder import hessian as hessian_np
from africanus.gridding.wgridder.dask import hessian
np.random.seed(420)
if precision == 'single':
real_type = np.float32
decimal = 4 # sometimes fails at 5
else:
real_type = np.float64
decimal = 5
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = np.maximum(1, nchan//nband)
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
image = np.random.randn(nband, nx, ny).astype(real_type)
convim_np = hessian_np(uvw, freq, image, freq_bin_idx,
freq_bin_counts, cell, weights=wgt,
nthreads=nthreads)
rows_per_task = int(np.ceil(nrow/nchunks))
row_chunks = (nchunks-1) * (rows_per_task,)
row_chunks += (nrow - np.sum(row_chunks),)
freq_da = da.from_array(freq, chunks=step)
uvw_da = da.from_array(uvw, chunks=(row_chunks, -1))
image_da = da.from_array(image, chunks=(1, nx, ny))
wgt_da = da.from_array(wgt, chunks=(row_chunks, step))
freq_bin_idx_da = da.from_array(freq_bin_idx, chunks=1)
freq_bin_counts_da = da.from_array(freq_bin_counts, chunks=1)
convim_da = hessian(uvw_da, freq_da, image_da,
freq_bin_idx_da, freq_bin_counts_da,
cell, weights=wgt_da, nthreads=nthreads).compute()
# should agree to within epsilon
rmax = np.maximum(np.abs(convim_np).max(), np.abs(convim_da).max())
assert_array_almost_equal(
convim_np/rmax, convim_da/rmax, decimal=decimal)
```
#### File: africanus/linalg/kronecker_tools.py
```python
import numpy as np
def kron_N(x):
"""
Computes N = N_1 x N_2 x ... x N_D i.e.
the total number of rows in a kronecker matrix
Parameters
----------
x : :class:`numpy.ndarray`
An array of arrays holding matrices/vectors [x1, x2, ..., xD]
Returns
-------
N : int
The total number of rows in a kronecker matrix or vector
"""
D = x.shape[0]
dims = np.zeros(D)
for i in range(D):
dims[i] = x[i].shape[0]
return int(np.prod(dims))
def kron_matvec(A, b):
"""
Computes the matrix vector product of
a kronecker matrix in linear time.
Assumes A consists of kronecker product
of square matrices.
Parameters
----------
A : :class:`numpy.ndarray`
An array of arrays holding
matrices [K0, K1, ...] where
:math:`A = K_0 \\otimes K_1 \\otimes \\cdots`
b : :class:`numpy.ndarray`
The right hand side vector
Returns
-------
x : :class:`numpy.ndarray`
The result of :code:`A.dot(b)`
"""
D = A.shape[0]
N = b.size
x = b
for d in range(D):
Gd = A[d].shape[0]
X = np.reshape(x, (Gd, N//Gd))
Z = np.einsum("ab,bc->ac", A[d], X)
Z = np.einsum("ab -> ba", Z)
x = Z.flatten()
return x
def kron_tensorvec(A, b):
"""
Matrix vector product of kronecker matrix A with
vector b. A can be made up of an arbitrary kronecker
product.
Parameters
----------
A : :class:`numpy.ndarray`
An array of arrays holding
matrices [K0, K1, ...] where
:math:`A = K_0 \\otimes K_1 \\otimes \\cdots`
b : :class:`numpy.ndarray`
The right hand side vector
Returns
-------
x : :class:`numpy.ndarray`
The result of :code:`A.dot(b)`
"""
D = A.shape[0]
# get shape of sub-matrices
G = np.zeros(D, dtype=np.int8)
M = np.zeros(D, dtype=np.int8)
for d in range(D):
M[d], G[d] = A[d].shape
x = b
for d in range(D):
Gd = G[d]
rem = np.prod(np.delete(G, d))
X = np.reshape(x, (Gd, rem))
Z = np.einsum("ab,bc->ac", A[d], X)
Z = np.einsum("ab -> ba", Z)
x = Z.flatten()
# replace with new dimension
G[d] = M[d]
return x
def kron_matmat(A, B):
"""
Computes the product between a kronecker matrix A
and some RHS matrix B
Parameters
----------
A : :class:`numpy.ndarray`
An array of arrays holding
matrices [K0, K1, ...] where
:math:`A = K_0 \\otimes K_1 \\otimes \\cdots`
B : :class:`numpy.ndarray`
The RHS matrix
Returns
-------
x : :class:`numpy.ndarray`
The result of :code:`A.dot(B)`
"""
M = B.shape[1] # the product of Np_1 x Np_2 x ... x Np_3
N = kron_N(A)
C = np.zeros([N, M])
for i in range(M):
C[:, i] = kron_matvec(A, B[:, i])
return C
def kron_tensormat(A, B):
"""
Computes the matrix product between A kronecker matrix A
and some RHS matrix B. Does not assume A to consist of a
kronecker product of square matrices.
Parameters
----------
A : :class:`numpy.ndarray`
An array of arrays holding
matrices [K0, K1, ...] where
:math:`A = K_0 \\otimes K_1 \\otimes \\cdots`
B : :class:`numpy.ndarray`
The RHS matrix
Returns
-------
x : :class:`numpy.ndarray`
The result of :code:`A.dot(B)`
"""
M = B.shape[1] # the product of Np_1 x Np_2 x ... x Np_3
N = kron_N(A)
C = np.zeros([N, M])
for i in range(M):
C[:, i] = kron_tensorvec(A, B[:, i])
return C
def kron_cholesky(A):
"""
Computes the Cholesky decomposition
of a kronecker matrix as a kronecker
matrix of Cholesky factors.
Parameters
----------
A : :class:`numpy.ndarray`
An array of arrays holding
matrices [K0, K1, ...] where
:math:`A = K_0 \\otimes K_1 \\otimes \\cdots`
Returns
-------
L : :class:`numpy.ndarray`
An array of arrays holding
matrices [L0, L1, ...] where
:math:`L = L_0 \\otimes L_1 \\otimes \\cdots`
and each :code:`Li = cholesky(Ki)`
"""
D = A.shape[0]
L = np.zeros_like(A)
for i in range(D):
try:
L[i] = np.linalg.cholesky(A[i])
except Exception: # add jitter
L[i] = np.linalg.cholesky(A[i] + 1e-13*np.eye(A[i].shape[0]))
return L
```
#### File: linalg/test/test_geometry.py
```python
import numpy as np
import pytest
from africanus.linalg.geometry import (
BoundingConvexHull,
BoundingBox,
BoundingBoxFactory,
)
@pytest.mark.parametrize("debug", [False])
def test_hull_construction(debug):
# test case 1
vals = np.array([[50, 60], [20, 40], [-74, 50], [-95, +10], [20, 60]])
bh = BoundingConvexHull(vals)
mask = bh.mask
assert mask.shape == (
np.max(vals[:, 1]) - np.min(vals[:, 1]) + 1,
np.max(vals[:, 0]) - np.min(vals[:, 0]) + 1,
)
# integral mask area needs to be close to true area
assert np.abs(mask.sum() - bh.area) / bh.area < 0.05
normalized_normals = (
bh.rnormals / np.linalg.norm(bh.rnormals, axis=1)[:, None]
)
# test case 2
for e, n in zip(bh.edges, normalized_normals):
edge_vec = e[1] - e[0]
assert np.all(np.abs(np.dot(edge_vec, n)) < 1.0e-8)
# test case 3
valsextract = np.array([[-10, 120], [90, 268], [293, 110], [40, -30]])
bh_extract = BoundingConvexHull(valsextract)
sinc_npx = 255
sinc = np.sinc(np.linspace(-7, 7, sinc_npx))
sinc2d = np.outer(sinc, sinc).reshape((1, 1, sinc_npx, sinc_npx))
(extracted_data,
extracted_window_extents) = BoundingConvexHull.regional_data(
bh_extract, sinc2d, oob_value=np.nan
)
assert extracted_window_extents == [-10, 293, -30, 268]
sparse_mask = np.array(bh_extract.sparse_mask)
sel = np.logical_and(
np.logical_and(sparse_mask[:, 1] >= 0, sparse_mask[:, 1] < 255),
np.logical_and(sparse_mask[:, 0] >= 0, sparse_mask[:, 0] < 255),
)
flat_index = (sparse_mask[sel][:, 0]) * sinc_npx + (sparse_mask[sel][:, 1])
sinc_integral = np.sum(sinc2d.ravel()[flat_index])
assert np.abs(sinc_integral - np.nansum(extracted_data.ravel())) < 1.0e-8
v = np.nanargmax(extracted_data)
vx = v % extracted_data.shape[3]
vy = v // extracted_data.shape[3]
cextracted = (
extracted_window_extents[0] + vx,
extracted_window_extents[2] + vy,
)
v = np.nanargmax(sinc2d)
sincvx = v % sinc_npx
sincvy = v // sinc_npx
csinc = tuple([sincvx, sincvy])
assert csinc == cextracted
# test case 4
vals2 = np.array([[-20, -120], [0, 60], [40, -60]])
vals3 = np.array([[-20, 58], [-40, 80], [20, 100]])
bh2 = BoundingConvexHull(vals2)
bh3 = BoundingConvexHull(vals3)
assert bh.overlaps_with(bh2)
assert not bh.overlaps_with(bh3)
assert not bh2.overlaps_with(bh3)
# test case 5
assert (-1000, -1000) not in bh
assert (30, 0) not in bh
assert (0, 0) not in bh
assert (-40, 30) in bh
# test case 6
bb = BoundingBox(-14, 20, 30, 49)
assert bb.centre == [3, 39]
assert bb.box_npx == (35, 20)
assert bb.mask.shape == bb.box_npx[::-1]
assert bb.area == 35 * 20
assert np.sum(bb.mask) == bb.area
assert (-15, 35) not in bb
assert (0, 35) in bb
bb2 = BoundingBoxFactory.AxisAlignedBoundingBox(bb) # enforce odd
assert bb2.box_npx == (35, 21)
assert bb2.area == 35 * 21
assert (bb.sparse_mask == bb2.sparse_mask).all()
assert (-15, 35) not in bb2
assert (0, 35) in bb2
bb3 = BoundingBoxFactory.AxisAlignedBoundingBox(
bb, square=True
) # enforce odd
assert bb3.box_npx[0] == bb3.box_npx[1]
assert bb3.box_npx[0] % 2 == 1 # enforce odd
assert bb3.area == bb3.box_npx[0] ** 2
assert (bb.sparse_mask == bb3.sparse_mask).all()
assert (-15, 35) not in bb2
assert (0, 35) in bb2
# test case 7
bb4s = BoundingBoxFactory.SplitBox(bb, nsubboxes=3)
assert len(bb4s) == 9
xlims = [(np.min(c.corners[:, 0]), np.max(c.corners[:, 0])) for c in bb4s][
0:3
]
ylims = [(np.min(c.corners[:, 1]), np.max(c.corners[:, 1])) for c in bb4s][
0::3
]
assert np.all(xlims == np.array([(-14, -3), (-2, 9), (10, 20)]))
assert np.all(ylims == np.array([(30, 36), (37, 43), (44, 49)]))
assert np.sum([b.area for b in bb4s]) == bb.area
for bb4 in bb4s:
assert bb4.area == np.sum(bb4.mask)
# test case 8
bb5 = BoundingBox(-14, 20, 30, 50)
assert bb5.box_npx == (35, 21)
bb6 = BoundingBoxFactory.PadBox(bb5, 41, 27)
assert bb6.box_npx == (41, 27)
assert bb5.centre == bb6.centre
assert np.sum(bb5.mask) == np.sum(bb6.mask)
bb7s = list(map(lambda x: BoundingBoxFactory.PadBox(x, 17, 11), bb4s))
assert all([b.box_npx == (17, 11) for b in bb7s])
assert np.sum([np.sum(b.mask) for b in bb7s]) == np.sum(
[np.sum(b.mask) for b in bb4s]
)
# test case 9
facet_regions = list(
map(
lambda f: BoundingBoxFactory.PadBox(f, 63, 63),
BoundingBoxFactory.SplitBox(
BoundingBoxFactory.AxisAlignedBoundingBox(bh_extract),
nsubboxes=5,
),
)
)
facets = list(
map(
lambda pf: BoundingConvexHull.regional_data(
pf, sinc2d, oob_value=np.nan
),
facet_regions,
)
)
stitched_image, stitched_region = BoundingBox.project_regions(
[f[0] for f in facets], facet_regions
)
assert (
np.abs(sinc_integral - np.nansum([np.nansum(f[0]) for f in facets]))
< 1.0e-8
)
assert np.abs(sinc_integral - np.sum(stitched_image)) < 1.0e-8
v = np.argmax(stitched_image)
vx = v % stitched_image.shape[3]
vy = v // stitched_image.shape[3]
cstitched = (
np.min(stitched_region.corners[:, 0]) + vx,
np.min(stitched_region.corners[:, 1]) + vy,
)
assert cstitched == csinc
# test case 10
olap_box1 = BoundingBox(110, 138, 110, 135)
olap_box2 = BoundingBox(115, 150, 109, 150)
olap_box3 = BoundingBox(125, 130, 125, 130)
BoundingConvexHull.normalize_masks([olap_box1, olap_box2, olap_box3])
ext1 = BoundingConvexHull.regional_data(olap_box1, sinc2d)[0]
ext2 = BoundingConvexHull.regional_data(olap_box2, sinc2d)[0]
ext3 = BoundingConvexHull.regional_data(olap_box3, sinc2d)[0]
olaps_stitched_image, olaps_stitched_region = BoundingBox.project_regions(
[ext1, ext2, ext3], [olap_box1, olap_box2, olap_box3]
)
v = np.nanargmax(olaps_stitched_image)
vx = v % olaps_stitched_image.shape[3]
vy = v // olaps_stitched_image.shape[3]
cstitched_olap = (
np.min(olaps_stitched_region.corners[:, 0]) + vx,
np.min(olaps_stitched_region.corners[:, 1]) + vy,
)
assert cstitched_olap == csinc
assert np.abs(1.0 - np.nanmax(olaps_stitched_image)) < 1.0e-8
# visual inspection
if debug:
from matplotlib import pyplot as plt
plt.figure(figsize=(7, 2.5))
plt.title("Winding, normals and masking check")
for h in [bh, bh2, bh3]:
for ei, e in enumerate(h.edges):
plt.plot(e[:, 0], e[:, 1], "r--")
plt.text(e[0, 0], e[0, 1], str(ei))
plt.plot(bh.edge_midpoints[:, 0], bh.edge_midpoints[:, 1], "ko")
for e, n in zip(bh.edge_midpoints, normalized_normals):
p0 = e
p = e + n * 6
plt.plot([p0[0], p[0]], [p0[1], p[1]], "b--", lw=2)
plt.scatter(vals[:, 0], vals[:, 1])
plt.imshow(
mask,
extent=[
np.min(vals[:, 0]),
np.max(vals[:, 0]),
np.max(vals[:, 1]),
np.min(vals[:, 1]),
],
)
plt.grid(True)
plt.savefig("/tmp/winding.png")
plt.figure(figsize=(7, 2.5))
plt.title("Data extraction check (global)")
for h in [bh_extract]:
for ei, e in enumerate(h.edges):
plt.plot(e[:, 0], e[:, 1], "r--")
plt.imshow(sinc2d[0, 0, :, :], extent=[0, sinc_npx, sinc_npx, 0])
plt.grid(True)
plt.savefig("/tmp/extract_global.png")
plt.figure(figsize=(7, 2.5))
plt.title("Data extraction check (local)")
for h in [bh_extract]:
for ei, e in enumerate(h.edges):
plt.plot(e[:, 0], e[:, 1], "r--")
plt.imshow(
extracted_data[0, 0, :, :],
extent=[
extracted_window_extents[0],
extracted_window_extents[1],
extracted_window_extents[3],
extracted_window_extents[2],
],
)
plt.savefig("/tmp/extract_local.png")
plt.figure(figsize=(7, 2.5))
plt.title("Faceting check")
for h in [bh_extract]:
for ei, e in enumerate(h.edges):
plt.plot(e[:, 0], e[:, 1], "r--")
for f in facet_regions:
for ei, e in enumerate(f.edges):
plt.plot(e[:, 0], e[:, 1], "co--")
plt.imshow(
stitched_image[0, 0, :, :],
extent=[
np.min(stitched_region.corners[:, 0]),
np.max(stitched_region.corners[:, 0]),
np.max(stitched_region.corners[:, 1]),
np.min(stitched_region.corners[:, 1]),
],
)
plt.savefig("/tmp/facet.png")
plt.figure(figsize=(7, 2.5))
plt.title("Overlapping faceting check")
for f in [olap_box1, olap_box2, olap_box3]:
for ei, e in enumerate(f.edges):
plt.plot(e[:, 0], e[:, 1], "co--")
plt.imshow(
olaps_stitched_image[0, 0, :, :],
extent=[
np.min(olaps_stitched_region.corners[:, 0]),
np.max(olaps_stitched_region.corners[:, 0]),
np.max(olaps_stitched_region.corners[:, 1]),
np.min(olaps_stitched_region.corners[:, 1]),
],
)
plt.xlim(
(
np.min(olaps_stitched_region.corners[:, 0]) - 15,
np.max(olaps_stitched_region.corners[:, 0]) + 15,
)
)
plt.ylim(
(
np.min(olaps_stitched_region.corners[:, 1]) - 15,
np.max(olaps_stitched_region.corners[:, 1]) + 15,
)
)
plt.savefig("/tmp/overlap_facet.png")
if __name__ == "__main__":
test_hull_construction()
```
#### File: model/coherency/conversion.py
```python
from collections import OrderedDict, deque
from pprint import pformat
from textwrap import fill
import numpy as np
from africanus.util.casa_types import (STOKES_TYPES,
STOKES_ID_MAP)
from africanus.util.docs import DocstringTemplate
stokes_conv = {
'RR': {('I', 'V'): lambda i, v: i + v + 0j},
'RL': {('Q', 'U'): lambda q, u: q + u*1j},
'LR': {('Q', 'U'): lambda q, u: q - u*1j},
'LL': {('I', 'V'): lambda i, v: i - v + 0j},
'XX': {('I', 'Q'): lambda i, q: i + q + 0j},
'XY': {('U', 'V'): lambda u, v: u + v*1j},
'YX': {('U', 'V'): lambda u, v: u - v*1j},
'YY': {('I', 'Q'): lambda i, q: i - q + 0j},
'I': {('XX', 'YY'): lambda xx, yy: (xx + yy).real / 2,
('RR', 'LL'): lambda rr, ll: (rr + ll).real / 2},
'Q': {('XX', 'YY'): lambda xx, yy: (xx - yy).real / 2,
('RL', 'LR'): lambda rl, lr: (rl + lr).real / 2},
'U': {('XY', 'YX'): lambda xy, yx: (xy + yx).real / 2,
('RL', 'LR'): lambda rl, lr: (rl - lr).imag / 2},
'V': {('XY', 'YX'): lambda xy, yx: (xy - yx).imag / 2,
('RR', 'LL'): lambda rr, ll: (rr - ll).real / 2},
}
class DimensionMismatch(Exception):
pass
class MissingConversionInputs(Exception):
pass
def _element_indices_and_shape(data):
if not isinstance(data, (tuple, list)):
data = [data]
# Shape of the data
shape = []
# Each stack element is (list, index, depth)
queue = deque([(data, (), 0)])
result = OrderedDict()
while len(queue) > 0:
current, current_idx, depth = queue.popleft()
# First do shape inference
if len(shape) <= depth:
shape.append(len(current))
elif shape[depth] != len(current):
raise DimensionMismatch("Dimension mismatch %d != %d at depth %d"
% (shape[depth], len(current), depth))
# Handle each sequence element
for i, e in enumerate(current):
# Found a list, recurse
if isinstance(e, (tuple, list)):
queue.append((e, current_idx + (i, ), depth + 1))
# String
elif isinstance(e, str):
if e in result:
raise ValueError("'%s' defined multiple times" % e)
result[e] = current_idx + (i, )
# We have a CASA integer Stokes ID, convert to string
elif np.issubdtype(type(e), np.integer):
try:
e = STOKES_ID_MAP[e]
except KeyError:
raise ValueError("Invalid id '%d'. "
"Valid id's '%s'"
% (e, pformat(STOKES_ID_MAP)))
if e in result:
raise ValueError("'%s' defined multiple times" % e)
result[e] = current_idx + (i, )
else:
raise TypeError("Invalid type '%s' for element '%s'"
% (type(e), e))
return result, tuple(shape)
def convert_setup(input, input_schema, output_schema):
input_indices, input_shape = _element_indices_and_shape(input_schema)
output_indices, output_shape = _element_indices_and_shape(output_schema)
if input.shape[-len(input_shape):] != input_shape:
raise ValueError("Last dimension of input doesn't match input schema")
mapping = []
dummy = input.dtype.type(0)
# Figure out how to produce an output from available inputs
for okey, out_idx in output_indices.items():
try:
deps = stokes_conv[okey]
except KeyError:
raise ValueError("Unknown output '%s'. Known types '%s'"
% (deps, STOKES_TYPES))
found_conv = False
# Find a mapping for which we have inputs
for (c1, c2), fn in deps.items():
# Get indices for both correlations
try:
c1_idx = (Ellipsis,) + input_indices[c1]
except KeyError:
continue
try:
c2_idx = (Ellipsis,) + input_indices[c2]
except KeyError:
continue
found_conv = True
out_idx = (Ellipsis,) + out_idx
# Figure out the data type for this output
dtype = fn(dummy, dummy).dtype
mapping.append((c1_idx, c2_idx, out_idx, fn, dtype))
break
# We must find a conversion
if not found_conv:
raise MissingConversionInputs("None of the supplied inputs '%s' "
"can produce output '%s'. It can be "
"produced by the following "
"combinations '%s'." % (
input_schema,
okey, deps.keys()))
out_dtype = np.result_type(*[dt for _, _, _, _, dt in mapping])
return mapping, input_shape, output_shape, out_dtype
def convert_impl(input, mapping, in_shape, out_shape, dtype):
# Make the output array
out_shape = input.shape[:-len(in_shape)] + out_shape
output = np.empty(out_shape, dtype=dtype)
for c1_idx, c2_idx, out_idx, fn, _ in mapping:
output[out_idx] = fn(input[c1_idx], input[c2_idx])
return output
def convert(input, input_schema, output_schema):
""" See STOKES_DOCS below """
# Do the conversion
mapping, in_shape, out_shape, dtype = convert_setup(input,
input_schema,
output_schema)
return convert_impl(input, mapping, in_shape, out_shape, dtype)
CONVERT_DOCS = """
This function converts forward and backward
from stokes ``I,Q,U,V`` to both linear ``XX,XY,YX,YY``
and circular ``RR, RL, LR, LL`` correlations.
For example, we can convert from stokes parameters
to linear correlations:
.. code-block:: python
stokes.shape == (10, 4, 4)
corrs = convert(stokes, ["I", "Q", "U", "V"],
[['XX', 'XY'], ['YX', 'YY'])
assert corrs.shape == (10, 4, 2, 2)
Or circular correlations to stokes:
.. code-block:: python
vis.shape == (10, 4, 2, 2)
stokes = convert(vis, [['RR', 'RL'], ['LR', 'LL']],
['I', 'Q', 'U', 'V'])
assert stokes.shape == (10, 4, 4)
``input`` can ``output`` can be arbitrarily nested or ordered lists,
but the appropriate inputs must be present to produce the requested
outputs.
The elements of ``input`` and ``output`` may be strings or integers
representing stokes parameters or correlations. See the Notes
for a full list.
Notes
-----
Only stokes parameters, linear and circular correlations are
currently handled, but the full list of id's and strings as defined
in the `CASA documentation
<https://casacore.github.io/casacore/classcasacore_1_1Stokes.html>`_
is:
.. code-block:: python
{stokes_type_map}
Parameters
----------
input : $(array_type)
Complex or floating point input data of shape
:code:`(dim_1, ..., dim_n, icorr_1, ..., icorr_m)`
input_schema : list of str or int
A schema describing the :code:`icorr_1, ..., icorr_m`
dimension of ``input``. Must have the same shape as
the last dimensions of ``input``.
output_schema : list of str or int
A schema describing the :code:`ocorr_1, ..., ocorr_n`
dimension of the return value.
Returns
-------
result : $(array_type)
Result of shape :code:`(dim_1, ..., dim_n, ocorr_1, ..., ocorr_m)`
The type may be floating point or promoted to complex
depending on the combinations in ``output``.
"""
# Fill in the STOKES TYPES
_map_str = ", ".join(["%s: %d" % (t, i) for i, t in enumerate(STOKES_TYPES)])
_map_str = "{{ " + _map_str + " }}"
# Indent must match docstrings
_map_str = fill(_map_str, initial_indent='', subsequent_indent=' '*8)
CONVERT_DOCS = DocstringTemplate(CONVERT_DOCS.format(stokes_type_map=_map_str))
del _map_str
try:
convert.__doc__ = CONVERT_DOCS.substitute(
array_type=":class:`numpy.ndarray`")
except AttributeError:
pass
```
#### File: model/shape/gaussian_shape.py
```python
import numpy as np
from africanus.util.docs import DocstringTemplate
from africanus.util.numba import generated_jit
from africanus.constants import c as lightspeed
@generated_jit(nopython=True, nogil=True, cache=True)
def gaussian(uvw, frequency, shape_params):
# https://en.wikipedia.org/wiki/Full_width_at_half_maximum
fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0))
fwhminv = 1.0 / fwhm
gauss_scale = fwhminv * np.sqrt(2.0) * np.pi / lightspeed
dtype = np.result_type(*(np.dtype(a.dtype.name) for
a in (uvw, frequency, shape_params)))
def impl(uvw, frequency, shape_params):
nsrc = shape_params.shape[0]
nrow = uvw.shape[0]
nchan = frequency.shape[0]
shape = np.empty((nsrc, nrow, nchan), dtype=dtype)
scaled_freq = np.empty_like(frequency)
# Scale each frequency
for f in range(frequency.shape[0]):
scaled_freq[f] = frequency[f] * gauss_scale
for s in range(shape_params.shape[0]):
emaj, emin, angle = shape_params[s]
# Convert to l-projection, m-projection, ratio
el = emaj * np.sin(angle)
em = emaj * np.cos(angle)
er = emin / (1.0 if emaj == 0.0 else emaj)
for r in range(uvw.shape[0]):
u, v, w = uvw[r]
u1 = (u*em - v*el)*er
v1 = u*el + v*em
for f in range(scaled_freq.shape[0]):
fu1 = u1*scaled_freq[f]
fv1 = v1*scaled_freq[f]
shape[s, r, f] = np.exp(-(fu1*fu1 + fv1*fv1))
return shape
return impl
GAUSSIAN_DOCS = DocstringTemplate(r"""
Computes the Gaussian Shape Function.
.. math::
& \lambda^\prime = 2 \lambda \pi \\
& r = \frac{e_{min}}{e_{maj}} \\
& u_{1} = (u \, e_{maj} \, cos(\alpha) - v \, e_{maj} \, sin(\alpha))
r \lambda^\prime \\
& v_{1} = (u \, e_{maj} \, sin(\alpha) - v \, e_{maj} \, cos(\alpha))
\lambda^\prime \\
& \textrm{shape} = e^{(-u_{1}^2 - v_{1}^2)}
where:
- :math:`u` and :math:`v` are the UV coordinates and
:math:`\lambda` the frequency.
- :math:`e_{maj}` and :math:`e_{min}` are the major and minor axes
and :math:`\alpha` the position angle.
Parameters
----------
uvw : $(array_type)
UVW coordinates of shape :code:`(row, 3)`
frequency : $(array_type)
frequencies of shape :code:`(chan,)`
shape_param : $(array_type)
Gaussian Shape Parameters of shape :code:`(source, 3)`
where the second dimension contains the
`(emajor, eminor, angle)` parameters describing
the shape of the Gaussian
Returns
-------
gauss_shape : $(array_type)
Shape parameters of shape :code:`(source, row, chan)`
""")
try:
gaussian.__doc__ = GAUSSIAN_DOCS.substitute(
array_type=":class:`numpy.ndarray`")
except KeyError:
pass
```
#### File: model/shape/shapelets.py
```python
import numba
import numpy as np
from africanus.constants import c as lightspeed
from africanus.constants import minus_two_pi_over_c
square_root_of_pi = 1.77245385091
@numba.jit(nogil=True, nopython=True, cache=True)
def hermite(n, x):
if n == 0:
return 1
elif n == 1:
return 2 * x
else:
return 2 * x * hermite(n - 1, x) - 2 * (n - 1) * hermite(n - 2, x)
@numba.jit(numba.uint64(numba.int32), nogil=True, nopython=True, cache=True)
def factorial(n):
if n <= 1:
return 1
ans = 1
for i in range(1, n):
ans = ans * i
return ans * n
@numba.jit(nogil=True, nopython=True, cache=True)
def basis_function(n, xx, beta, fourier=False, delta_x=-1):
if fourier:
x = 2 * np.pi * xx
scale = 1.0 / beta
else:
x = xx
scale = beta
basis_component = 1.0 / np.sqrt(
2.0 ** n * np.sqrt(np.pi) * factorial(n) * scale
)
exponential_component = hermite(n, x / scale) * np.exp(
-(x ** 2) / (2.0 * scale ** 2)
)
if fourier:
return (
1.0j ** n
* basis_component
* exponential_component
* np.sqrt(2 * np.pi)
/ delta_x
)
else:
return basis_component * exponential_component
@numba.jit(nogil=True, nopython=True, cache=True)
def phase_steer_and_w_correct(uvw, lm_source_center, frequency):
l0, m0 = lm_source_center
n0 = np.sqrt(1.0 - l0 ** 2 - m0 ** 2)
u, v, w = uvw
real_phase = (
minus_two_pi_over_c * frequency * (u * l0 + v * m0 + w * (n0 - 1))
)
return np.exp(1.0j * real_phase)
@numba.jit(nogil=True, nopython=True, cache=True)
def shapelet(coords, frequency, coeffs, beta, delta_lm, dtype=np.complex128):
"""
shapelet: outputs visibilities corresponding to that of a shapelet
Inputs:
coords: coordinates in (u,v) space with shape (nrow, 3)
frequency: frequency values with shape (nchan,)
coeffs: shapelet coefficients with shape, where
coeffs[3, 4] = coeffs_l[3] * coeffs_m[4] (nsrc, nmax1, nmax2)
beta: characteristic shapelet size with shape (nsrc, 2)
delta_l: pixel size in l dim
delta_m: pixel size in m dim
lm: source center coordinates of shape (nsource, 2)
Returns:
out_shapelets: Shapelet with shape (nrow, nchan, nsrc)
"""
nrow = coords.shape[0]
nsrc = coeffs.shape[0]
nchan = frequency.shape[0]
out_shapelets = np.empty((nrow, nchan, nsrc), dtype=np.complex128)
delta_l, delta_m = delta_lm
for row in range(nrow):
u, v, _ = coords[row, :]
for chan in range(nchan):
fu = u * 2 * np.pi * frequency[chan] / lightspeed
fv = v * 2 * np.pi * frequency[chan] / lightspeed
for src in range(nsrc):
nmax1, nmax2 = coeffs[src, :, :].shape
beta_u, beta_v = beta[src, :]
if beta_u == 0 or beta_v == 0:
out_shapelets[row, chan, src] = 1
continue
tmp_shapelet = 0 + 0j
for n1 in range(nmax1):
for n2 in range(nmax2):
tmp_shapelet += (
0
if coeffs[src][n1, n2] == 0
else coeffs[src][n1, n2]
* basis_function(
n1, fu, beta_u, True, delta_x=delta_l
)
* basis_function(
n2, fv, beta_v, True, delta_x=delta_m
)
)
out_shapelets[row, chan, src] = tmp_shapelet
return out_shapelets
@numba.jit(nogil=True, nopython=True, cache=True)
def shapelet_with_w_term(
coords, frequency, coeffs, beta, delta_lm, lm, dtype=np.complex128
):
"""
shapelet: outputs visibilities corresponding to that of a shapelet
Inputs:
coords: coordinates in (u,v) space with shape (nrow, 3)
frequency: frequency values with shape (nchan,)
coeffs: shapelet coefficients with shape, where
coeffs[3, 4] = coeffs_l[3] * coeffs_m[4] (nsrc, nmax1, nmax2)
beta: characteristic shapelet size with shape (nsrc, 2)
delta_l: pixel size in l dim
delta_m: pixel size in m dim
lm: source center coordinates of shape (nsource, 2)
Returns:
out_shapelets: Shapelet with shape (nrow, nchan, nsrc)
"""
nrow = coords.shape[0]
nsrc = coeffs.shape[0]
nchan = frequency.shape[0]
out_shapelets = np.empty((nrow, nchan, nsrc), dtype=np.complex128)
delta_l, delta_m = delta_lm
for row in range(nrow):
u, v, w = coords[row, :]
for chan in range(nchan):
fu = u * 2 * np.pi * frequency[chan] / lightspeed
fv = v * 2 * np.pi * frequency[chan] / lightspeed
for src in range(nsrc):
nmax1, nmax2 = coeffs[src, :, :].shape
beta_u, beta_v = beta[src, :]
l, m = lm[src, :]
if beta_u == 0 or beta_v == 0:
out_shapelets[row, chan, src] = 1
continue
tmp_shapelet = 0 + 0j
for n1 in range(nmax1):
for n2 in range(nmax2):
tmp_shapelet += (
0
if coeffs[src][n1, n2] == 0
else coeffs[src][n1, n2]
* basis_function(
n1, fu, beta_u, True, delta_x=delta_l
)
* basis_function(
n2, fv, beta_v, True, delta_x=delta_m
)
)
w_term = phase_steer_and_w_correct(
(u, v, w), (l, m), frequency[chan]
)
out_shapelets[row, chan, src] = tmp_shapelet * w_term
return out_shapelets
# @numba.jit(nogil=True, nopython=True, cache=True)
def shapelet_1d(u, coeffs, fourier, delta_x=1, beta=1.0):
"""
The one dimensional shapelet. Default is to return the
dimensionless version.
Parameters
----------
u : :class:`numpy.ndarray`
Array of coordinates at which to evaluate the shapelet
of shape (nrow)
coeffs : :class:`numpy.ndarray`
Array of shapelet coefficients of shape (ncoeff)
fourier : bool
Whether to evaluate the shapelet in Fourier space
or in signal space
beta : float, optional
The scale parameter for the shapelet. If fourier is
true the scale is 1/beta
Returns
-------
out : :class:`numpy.ndarray`
The shapelet evaluated at u of shape (nrow)
"""
nrow = u.size
if fourier:
if delta_x is None:
raise ValueError(
"You have to pass in a value for delta_x in Fourier mode"
)
out = np.zeros(nrow, dtype=np.complex128)
else:
out = np.zeros(nrow, dtype=np.float64)
for row, ui in enumerate(u):
for n, c in enumerate(coeffs):
out[row] += c * basis_function(
n, ui, beta, fourier=fourier, delta_x=delta_x
)
return out
# @numba.jit(nogil=True, nopython=True, cache=True)
def shapelet_2d(u, v, coeffs_l, fourier, delta_x=None, delta_y=None, beta=1.0):
nrow_u = u.size
nrow_v = v.size
if fourier:
if delta_x is None or delta_y is None:
raise ValueError(
"You have to pass in a value for delta_x and delta_y\
in Fourier mode"
)
out = np.zeros((nrow_u, nrow_v), dtype=np.complex128)
else:
out = np.zeros((nrow_u, nrow_v), dtype=np.float64)
for i, ui in enumerate(u):
for j, vj in enumerate(v):
for n1 in range(coeffs_l.shape[0]):
for n2 in range(coeffs_l.shape[1]):
c = coeffs_l[n1, n2]
out[i, j] += (
c
* basis_function(
n1, ui, beta, fourier=fourier, delta_x=delta_x
)
* basis_function(
n2, vj, beta, fourier=fourier, delta_x=delta_y
)
)
return out
```
#### File: spi/examples/simple_spi_fitter.py
```python
import argparse
import dask
import dask.array as da
import numpy as np
from astropy.io import fits
import warnings
from africanus.model.spi.dask import fit_spi_components
iFs = np.fft.ifftshift
Fs = np.fft.fftshift
# we want to fall back to numpy if pypocketfft is not installed
# so set up functions to have the same call signatures
try:
from pypocketfft import r2c, c2r
def fft(x, ax, ncpu):
return r2c(x, axes=ax, forward=True,
nthreads=ncpu, inorm=0)
def ifft(y, ax, ncpu, lastsize):
return c2r(y, axes=ax, forward=False, lastsize=lastsize,
nthreads=args.ncpu, inorm=2)
except BaseException:
warnings.warn("No pypocketfft installation found. "
"FFT's will be performed in serial. "
"Install pypocketfft from "
"https://gitlab.mpcdf.mpg.de/mtr/pypocketfft "
"for optimal performance.",
ImportWarning)
from numpy.fft import rfftn, irfftn
# additional arguments will have no effect
def fft(x, ax, ncpu):
return rfftn(x, axes=ax)
def ifft(y, ax, ncpu, lastsize):
return irfftn(y, axes=ax)
def Gaussian2D(xin, yin, GaussPar=(1., 1., 0.)):
S0, S1, PA = GaussPar
PA = 90 + PA
SMaj = np.max([S0, S1])
SMin = np.min([S0, S1])
A = np.array([[1. / SMaj ** 2, 0],
[0, 1. / SMin ** 2]])
c, s, t = np.cos, np.sin, PA
R = np.array([[c(t), -s(t)],
[s(t), c(t)]])
A = np.dot(np.dot(R.T, A), R)
sOut = xin.shape
# only compute the result where necessary
extent = (5 * SMaj)**2
xflat = xin.squeeze()
yflat = yin.squeeze()
ind = np.argwhere(xflat**2 + yflat**2 <= extent).squeeze()
idx = ind[:, 0]
idy = ind[:, 1]
x = np.array([xflat[idx, idy].ravel(), yflat[idx, idy].ravel()])
R = np.einsum('nb,bc,cn->n', x.T, A, x)
# need to adjust for the fact that GaussPar corresponds to FWHM
fwhm_conv = 2*np.sqrt(2*np.log(2))
tmp = np.exp(-fwhm_conv*R)
gausskern = np.zeros_like(xflat, dtype=np.float64)
gausskern[idx, idy] = tmp
return np.ascontiguousarray(gausskern.reshape(sOut),
dtype=np.float64)
def convolve_model(model, gausskern, args):
print("Doing FFT's")
# get padding
_, npix_l, npix_m = model.shape
pfrac = args.padding_frac/2.0
npad_l = int(pfrac*npix_l)
npad_m = int(pfrac*npix_m)
# get fast FFT sizes
try:
from scipy.fftpack import next_fast_len
nfft = next_fast_len(npix_l + 2*npad_l)
npad_ll = (nfft - npix_l)//2
npad_lr = nfft - npix_l - npad_ll
nfft = next_fast_len(npix_m + 2*npad_m)
npad_ml = (nfft - npix_m)//2
npad_mr = nfft - npix_m - npad_ml
padding = ((0, 0), (npad_ll, npad_lr), (npad_ml, npad_mr))
unpad_l = slice(npad_ll, -npad_lr)
unpad_m = slice(npad_ml, -npad_mr)
except BaseException:
warnings.warn("Could not determine fast fft size. "
"Install scipy for optimal performance.",
ImportWarning)
padding = ((0, 0), (npad_l, npad_l), (npad_m, npad_m))
unpad_l = slice(npad_l, -npad_l)
unpad_m = slice(npad_m, -npad_m)
ax = (1, 2) # axes over which to perform fft
lastsize = npix_m + np.sum(padding[-1])
# get FT of convolution kernel
gausskernhat = fft(iFs(np.pad(gausskern[None], padding, mode='constant'),
axes=ax), ax, args.ncpu)
# Convolve model with Gaussian kernel
convmodel = fft(iFs(np.pad(model, padding, mode='constant'), axes=ax),
ax, args.ncpu)
convmodel *= gausskernhat
return Fs(ifft(convmodel, ax, args.ncpu, lastsize),
axes=ax)[:, unpad_l, unpad_m]
def interpolate_beam(xx, yy, maskindices, freqs, args):
print("Interpolating beam")
l_source = xx[maskindices[:, 0], maskindices[:, 1]]
m_source = yy[maskindices[:, 0], maskindices[:, 1]]
lm_source = np.vstack((l_source.ravel(), m_source.ravel())).T
ntime = 1
nant = 1
nband = freqs.size
parangles = np.zeros((ntime, nant,), dtype=np.float64)
ant_scale = np.ones((nant, nband, 2), dtype=np.float64)
point_errs = np.zeros((ntime, nant, nband, 2), dtype=np.float64)
if args.beammodel == "eidos":
raise NotImplementedError("eidos is coming!!!")
else:
print("Loading fits beam patterns from %s" % args.beammodel)
from glob import glob
paths = glob(args.beammodel + '_**_**.fits')
beam_hdr = None
for path in paths:
if 'xx' in path or 'XX' in path or 'rr' in path or 'RR' in path:
if 're' in path:
corr1_re = fits.getdata(path)
if beam_hdr is None:
beam_hdr = fits.getheader(path)
elif 'im' in path:
corr1_im = fits.getdata(path)
else:
raise NotImplementedError("Only re/im patterns supported")
elif 'yy' in path or 'YY' in path or 'll' in path or 'LL' in path:
if 're' in path:
corr2_re = fits.getdata(path)
elif 'im' in path:
corr2_im = fits.getdata(path)
else:
raise NotImplementedError("Only re/im patterns supported")
# get Stokes I amplitude
beam_amp = (corr1_re**2 + corr1_im**2 + corr2_re**2 + corr2_im**2)/2.0
# get cube in correct shape for interpolation code
beam_amp = np.ascontiguousarray(np.transpose(beam_amp, (1, 2, 0))
[:, :, :, None, None])
# get cube info
if beam_hdr['CUNIT1'] != "DEG" and beam_hdr['CUNIT1'] != "deg":
raise ValueError("Beam image units must be in degrees")
npix_l = beam_hdr['NAXIS1']
refpix_l = beam_hdr['CRPIX1']
delta_l = beam_hdr['CDELT1']
l_min = (1 - refpix_l)*delta_l
l_max = (1 + npix_l - refpix_l)*delta_l
if beam_hdr['CUNIT2'] != "DEG" and beam_hdr['CUNIT2'] != "deg":
raise ValueError("Beam image units must be in degrees")
npix_m = beam_hdr['NAXIS2']
refpix_m = beam_hdr['CRPIX2']
delta_m = beam_hdr['CDELT2']
m_min = (1 - refpix_m)*delta_m
m_max = (1 + npix_m - refpix_m)*delta_m
if (l_min > l_source.min() or m_min > m_source.min() or
l_max < l_source.max() or m_max < m_source.max()):
raise ValueError("The supplied beam is not large enough")
beam_extents = np.array([[l_min, l_max], [m_min, m_max]])
# get frequencies
if beam_hdr["CTYPE3"] != 'FREQ':
raise ValueError(
"Cubes are assumed to be in format [nchan, nx, ny]")
nchan = beam_hdr['NAXIS3']
refpix = beam_hdr['CRPIX3']
delta = beam_hdr['CDELT3'] # assumes units are Hz
freq0 = beam_hdr['CRVAL3']
bfreqs = freq0 + np.arange(1 - refpix, 1 + nchan - refpix) * delta
if bfreqs[0] > freqs[0] or bfreqs[-1] < freqs[-1]:
warnings.warn("The supplied beam does not have sufficient "
"bandwidth. Beam frequencies:")
with np.printoptions(precision=2):
print(bfreqs)
# LB - dask probably not necessary for small problem
from africanus.rime.fast_beam_cubes import beam_cube_dde
beam_source = beam_cube_dde(beam_amp, beam_extents, bfreqs,
lm_source, parangles, point_errs,
ant_scale, freqs).squeeze()
return beam_source
def create_parser():
p = argparse.ArgumentParser(description='Simple spectral index fitting'
'tool.',
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument("--fitsmodel", type=str, required=True)
p.add_argument("--fitsresidual", type=str)
p.add_argument('--outfile', type=str,
help="Path to output directory. \n"
"Placed next to input model if outfile not provided.")
p.add_argument('--beampars', default=None, nargs='+', type=float,
help="Beam parameters matching FWHM of restoring beam "
"specified as emaj emin pa. \n"
"By default these are taken from the fits header "
"of the residual image.")
p.add_argument('--threshold', default=5, type=float,
help="Multiple of the rms in the residual to threshold "
"on. \n"
"Only components above threshold*rms will be fit.")
p.add_argument('--maxDR', default=100, type=float,
help="Maximum dynamic range used to determine the "
"threshold above which components need to be fit. \n"
"Only used if residual is not passed in.")
p.add_argument('--ncpu', default=0, type=int,
help="Number of threads to use. \n"
"Default of zero means use all threads")
p.add_argument('--beammodel', default=None, type=str,
help="Fits beam model to use. \n"
"It is assumed that the pattern is path_to_beam/"
"name_corr_re/im.fits. \n"
"Provide only the path up to name "
"e.g. /home/user/beams/meerkat_lband. \n"
"Patterns mathing corr are determined "
"automatically. \n"
"Only real and imaginary beam models currently "
"supported.")
p.add_argument('--output', default='aiIbc', type=str,
help="Outputs to write. Letter correspond to: \n"
"a - alpha map \n"
"i - I0 map \n"
"I - reconstructed cube form alpha and I0 \n"
"b - interpolated beam \n"
"c - restoring beam used for convolution \n"
"Default is to write all of them")
p.add_argument("--padding_frac", default=0.2, type=float,
help="Padding factor for FFT's.")
return p
def main(args):
ref_hdr = fits.getheader(args.fitsresidual)
if args.beampars is None:
print("Attempting to take beampars from residual fits header")
emaj = ref_hdr['BMAJ1']
emin = ref_hdr['BMIN1']
pa = ref_hdr['BPA1']
beampars = (emaj, emin, pa)
else:
beampars = tuple(args.beampars)
# emaj, emin, pa = args.beampars
print("Using emaj = %3.2e, emin = %3.2e, PA = %3.2e" % beampars)
# load images
model = np.ascontiguousarray(fits.getdata(args.fitsmodel).squeeze(),
dtype=np.float64)
mhdr = fits.getheader(args.fitsmodel)
if mhdr['CUNIT1'] != "DEG" and mhdr['CUNIT1'] != "deg":
raise ValueError("Image units must be in degrees")
npix_l = mhdr['NAXIS1']
refpix_l = mhdr['CRPIX1']
delta_l = mhdr['CDELT1']
l_coord = np.arange(1 - refpix_l, 1 + npix_l - refpix_l)*delta_l
if mhdr['CUNIT2'] != "DEG" and mhdr['CUNIT2'] != "deg":
raise ValueError("Image units must be in degrees")
npix_m = mhdr['NAXIS2']
refpix_m = mhdr['CRPIX2']
delta_m = mhdr['CDELT2']
m_coord = np.arange(1 - refpix_m, 1 + npix_m - refpix_m)*delta_m
print("Image shape = ", (npix_l, npix_m))
# get frequencies
if mhdr["CTYPE4"] == 'FREQ':
freq_axis = 4
nband = mhdr['NAXIS4']
refpix_nu = mhdr['CRPIX4']
delta_nu = mhdr['CDELT4'] # assumes units are Hz
ref_freq = mhdr['CRVAL4']
ncorr = mhdr['NAXIS3']
elif mhdr["CTYPE3"] == 'FREQ':
freq_axis = 3
nband = mhdr['NAXIS3']
refpix_nu = mhdr['CRPIX3']
delta_nu = mhdr['CDELT3'] # assumes units are Hz
ref_freq = mhdr['CRVAL3']
ncorr = mhdr['NAXIS4']
else:
raise ValueError("Freq axis must be 3rd or 4th")
if ncorr > 1:
raise ValueError("Only Stokes I cubes supported")
freqs = ref_freq + np.arange(1 - refpix_nu,
1 + nband - refpix_nu) * delta_nu
print("Cube frequencies:")
with np.printoptions(precision=2):
print(freqs)
print("Reference frequency is %3.2e Hz " % ref_freq)
# get the Gaussian convolution kernel
xx, yy = np.meshgrid(l_coord, m_coord)
gausskern = Gaussian2D(xx, yy, beampars)
# Convolve model with Gaussian restroring beam at lowest frequency
model = convolve_model(model, gausskern, args)
# set threshold
if args.fitsresidual is not None:
resid = fits.getdata(args.fitsresidual).squeeze().astype(np.float64)
rms = np.std(resid)
rms_cube = np.std(resid.reshape(nband, npix_l*npix_m), axis=1).ravel()
threshold = args.threshold * rms
print("Setting cutoff threshold as %i times the rms "
"of the residual" % args.threshold)
del resid
else:
print("No residual provided. Setting threshold i.t.o dynamic range. "
"Max dynamic range is %i" % args.maxDR)
threshold = model.max()/args.maxDR
if args.channelweights is None:
rms_cube = None
print("Threshold set to %f Jy." % threshold)
# get pixels above threshold
minimage = np.amin(model, axis=0)
maskindices = np.argwhere(minimage > threshold)
if not maskindices.size:
raise ValueError("No components found above threshold. "
"Try lowering your threshold."
"Max of convolved model is %3.2e" % model.max())
fitcube = model[:, maskindices[:, 0], maskindices[:, 1]].T
print(xx.shape, yy.shape, maskindices.shape)
# get primary beam at source locations
if args.beammodel is not None:
beam_source = interpolate_beam(xx, yy, maskindices, freqs, args)
# correct cube
fitcube /= beam_source
# set weights for fit
if rms_cube is not None:
print("Using RMS in each imaging band to determine weights.")
weights = np.where(rms_cube > 0, 1.0/rms_cube**2, 0.0)
# normalise
weights /= weights.max()
else:
print("No residual provided. Using equal weights.")
weights = np.ones(nband, dtype=np.float64)
ncomps, _ = fitcube.shape
fitcube = da.from_array(fitcube.astype(np.float64),
chunks=(ncomps//args.ncpu, nband))
weights = da.from_array(weights.astype(np.float64), chunks=(nband))
freqsdask = da.from_array(freqs.astype(np.float64), chunks=(nband))
print("Fitting %i components" % ncomps)
alpha, _, Iref, _ = fit_spi_components(fitcube, weights, freqsdask,
np.float64(ref_freq)).compute()
print("Done. Writing output.")
alphamap = np.zeros([npix_l, npix_m])
i0map = np.zeros([npix_l, npix_m])
alphamap[maskindices[:, 0], maskindices[:, 1]] = alpha
i0map[maskindices[:, 0], maskindices[:, 1]] = Iref
# save next to model if no outfile is provided
if args.outfile is None:
# find last /
tmp = args.fitsmodel[::-1]
idx = tmp.find('/')
if idx != -1:
outfile = args.fitsmodel[0:-idx]
else:
outfile = 'image-'
else:
outfile = args.outfile
hdu = fits.PrimaryHDU(header=mhdr)
if 'I' in args.output:
# get the reconstructed cube
Irec_cube = i0map[None, :, :] * \
(freqs[:, None, None]/ref_freq)**alphamap[None, :, :]
# save it
if freq_axis == 3:
hdu.data = Irec_cube[None, :, :, :]
elif freq_axis == 4:
hdu.data = Irec_cube[:, None, :, :]
name = outfile + 'Irec_cube.fits'
hdu.writeto(name, overwrite=True)
print("Wrote reconstructed cube to %s" % name)
if args.beammodel is not None and 'b' in args.output:
beam_map = np.zeros((nband, npix_l, npix_m))
beam_map[:, maskindices[:, 0], maskindices[:, 1]] = beam_source.T
if freq_axis == 3:
hdu.data = beam_map[None, :, :, :]
elif freq_axis == 4:
hdu.data = beam_map[:, None, :, :]
name = outfile + 'interpolated_beam_cube.fits'
hdu.writeto(name, overwrite=True)
print("Wrote interpolated beam cube to %s" % name)
hdr_keys = ['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'NAXIS3',
'NAXIS4', 'BUNIT', 'BMAJ', 'BMIN', 'BPA', 'EQUINOX', 'BTYPE',
'TELESCOP', 'OBSERVER', 'OBJECT', 'ORIGIN', 'CTYPE1', 'CTYPE2',
'CTYPE3', 'CTYPE4', 'CRPIX1', 'CRPIX2', 'CRPIX3', 'CRPIX4',
'CRVAL1', 'CRVAL2', 'CRVAL3', 'CRVAL4', 'CDELT1', 'CDELT2',
'CDELT3', 'CDELT4', 'CUNIT1', 'CUNIT2', 'CUNIT3', 'CUNIT4',
'SPECSYS', 'DATE-OBS']
new_hdr = {}
for key in hdr_keys:
new_hdr[key] = ref_hdr[key]
if freq_axis == 3:
new_hdr["NAXIS3"] = 1
new_hdr["CRVAL3"] = ref_freq
elif freq_axis == 4:
new_hdr["NAXIS4"] = 1
new_hdr["CRVAL4"] = ref_freq
new_hdr = fits.Header(new_hdr)
# save alpha map
if 'a' in args.output:
hdu = fits.PrimaryHDU(header=new_hdr)
hdu.data = alphamap
name = outfile + 'alpha.fits'
hdu.writeto(name, overwrite=True)
print("Wrote alpha map to %s" % name)
# save I0 map
if 'i' in args.output:
hdu = fits.PrimaryHDU(header=new_hdr)
hdu.data = i0map
name = outfile + 'I0.fits'
hdu.writeto(name, overwrite=True)
print("Wrote I0 map to %s" % name)
# save clean beam for consistency check
if 'c' in args.output:
hdu = fits.PrimaryHDU(header=new_hdr)
hdu.data = gausskern
name = outfile + 'clean-beam.fits'
hdu.writeto(name, overwrite=True)
print("Wrote clean beam to %s" % name)
print("All done here")
if __name__ == "__main__":
args = create_parser().parse_args()
if args.ncpu:
from multiprocessing.pool import ThreadPool
dask.config.set(pool=ThreadPool(args.ncpu))
else:
import multiprocessing
args.ncpu = multiprocessing.cpu_count()
print("Using %i threads" % args.ncpu)
main(args)
```
#### File: model/wsclean/spec_model.py
```python
from numba import types
import numpy as np
from africanus.util.numba import generated_jit
from africanus.util.docs import DocstringTemplate
def ordinary_spectral_model(I, coeffs, log_poly, freq, ref_freq): # noqa: E741
""" Numpy ordinary polynomial implementation """
coeffs_idx = np.arange(1, coeffs.shape[1] + 1)
# (source, chan, coeffs-comp)
term = (freq[None, :, None] / ref_freq[:, None, None]) - 1.0
term = term**coeffs_idx[None, None, :]
term = coeffs[:, None, :]*term
return I[:, None] + term.sum(axis=2)
def log_spectral_model(I, coeffs, log_poly, freq, ref_freq): # noqa: E741
""" Numpy logarithmic polynomial implementation """
# No negative flux
I = np.where(log_poly == False, 1.0, I) # noqa: E741, E712
coeffs_idx = np.arange(1, coeffs.shape[1] + 1)
# (source, chan, coeffs-comp)
term = np.log(freq[None, :, None] / ref_freq[:, None, None])
term = term**coeffs_idx[None, None, :]
term = coeffs[:, None, :]*term
return np.exp(np.log(I)[:, None] + term.sum(axis=2))
@generated_jit(nopython=True, nogil=True, cache=True)
def _check_log_poly_shape(coeffs, log_poly):
if isinstance(log_poly, types.npytypes.Array):
def impl(coeffs, log_poly):
if coeffs.shape[0] != log_poly.shape[0]:
raise ValueError("coeffs.shape[0] != log_poly.shape[0]")
elif isinstance(log_poly, types.scalars.Boolean):
def impl(coeffs, log_poly):
pass
else:
raise ValueError("log_poly must be ndarray or bool")
return impl
@generated_jit(nopython=True, nogil=True, cache=True)
def _log_polynomial(log_poly, s):
if isinstance(log_poly, types.npytypes.Array):
def impl(log_poly, s):
return log_poly[s]
elif isinstance(log_poly, types.scalars.Boolean):
def impl(log_poly, s):
return log_poly
else:
raise ValueError("log_poly must be ndarray or bool")
return impl
@generated_jit(nopython=True, nogil=True, cache=True)
def spectra(I, coeffs, log_poly, ref_freq, frequency): # noqa: E741
arg_dtypes = tuple(np.dtype(a.dtype.name) for a
in (I, coeffs, ref_freq, frequency))
dtype = np.result_type(*arg_dtypes)
def impl(I, coeffs, log_poly, ref_freq, frequency): # noqa: E741
if not (I.shape[0] == coeffs.shape[0] == ref_freq.shape[0]):
raise ValueError("first dimensions of I, coeffs "
"and ref_freq don't match.")
_check_log_poly_shape(coeffs, log_poly)
nsrc = I.shape[0]
nchan = frequency.shape[0]
ncoeffs = coeffs.shape[1]
spectral_model = np.empty((nsrc, nchan), dtype=dtype)
for s in range(nsrc):
rf = ref_freq[s]
if _log_polynomial(log_poly, s):
for f in range(frequency.shape[0]):
nu = frequency[f]
flux = I[s]
if flux <= 0.0:
raise ValueError("Log polynomial flux must be > 0")
# Initialise with base polynomial value
spectral_model[s, f] = np.log(flux)
for c in range(ncoeffs):
term = coeffs[s, c]
if term <= 0.0:
raise ValueError("log polynomial coefficient "
"must be > 0")
term *= np.log(nu/rf)**(c + 1)
spectral_model[s, f] += term
spectral_model[s, f] = np.exp(spectral_model[s, f])
else:
for f in range(frequency.shape[0]):
nu = frequency[f]
# Initialise with base polynomial value
spectral_model[s, f] = I[s]
for c in range(ncoeffs):
term = coeffs[s, c]
term *= ((nu/rf) - 1.0)**(c + 1)
spectral_model[s, f] += term
return spectral_model
return impl
SPECTRA_DOCS = DocstringTemplate(r"""
Produces a spectral model from a polynomial expansion of
a wsclean file model. Depending on how `log_poly` is set
ordinary or logarithmic polynomials are used to produce
the expansion:
.. math::
& flux(\lambda) =
I_{0} + \sum\limits_{c=0} \textrm{coeffs}(c)
({\lambda/\lambda_{ref}} - 1)^{c+1}
\\
& flux(\lambda) =
\exp \left( \log I_{0} +
\sum\limits_{c=0} \textrm{coeffs}(c)
\log({\lambda/\lambda_{ref}})^{c+1}
\right) \\
See the `WSClean Component List
<https://sourceforge.net/p/wsclean/wiki/ComponentList/>`_
for further details.
Parameters
----------
I : $(array_type)
flux density in Janskys at the reference frequency
of shape :code:`(source,)`
coeffs : $(array_type)
Polynomial coefficients for each source of
shape :code:`(source, comp)`
log_poly : $(array_type) or bool
boolean array of shape :code:`(source, )`
indicating whether logarithmic (True) or ordinary (False)
polynomials should be used.
ref_freq : $(array_type)
Source reference frequencies of shape :code:`(source,)`
frequency : $(array_type)
frequencies of shape :code:`(chan,)`
See Also
--------
africanus.model.wsclean.load
Returns
-------
spectral_model : $(array_type)
Spectral Model of shape :code:`(source, chan)`
""")
try:
spectra.__doc__ = SPECTRA_DOCS.substitute(
array_type=":class:`numpy.ndarray`")
except AttributeError:
pass
```
#### File: rime/cuda/beam.py
```python
from functools import reduce
import logging
from operator import mul
from pathlib import Path
import numpy as np
from africanus.rime.fast_beam_cubes import BEAM_CUBE_DOCS
from africanus.util.code import format_code, memoize_on_key
from africanus.util.cuda import cuda_function, grids
from africanus.util.jinja2 import jinja_env
from africanus.util.requirements import requires_optional
try:
import cupy as cp
from cupy.core._scalar import get_typename as _get_typename
from cupy.cuda.compiler import CompileException
except ImportError as e:
opt_import_error = e
else:
opt_import_error = None
log = logging.getLogger(__name__)
_MAIN_TEMPLATE_PATH = Path("rime", "cuda", "beam.cu.j2")
_INTERP_TEMPLATE_PATH = Path("rime", "cuda", "beam_freq_interp.cu.j2")
BEAM_NUD_LIMIT = 128
def _freq_interp_key(beam_freq_map, frequencies):
return (beam_freq_map.dtype, frequencies.dtype)
@memoize_on_key(_freq_interp_key)
def _generate_interp_kernel(beam_freq_map, frequencies):
render = jinja_env.get_template(str(_INTERP_TEMPLATE_PATH)).render
name = "beam_cube_freq_interp"
block = (1024, 1, 1)
code = render(kernel_name=name,
beam_nud_limit=BEAM_NUD_LIMIT,
blockdimx=block[0],
beam_freq_type=_get_typename(beam_freq_map.dtype),
freq_type=_get_typename(frequencies.dtype))
code = code.encode('utf-8')
dtype = np.result_type(beam_freq_map, frequencies)
return cp.RawKernel(code, name), block, dtype
def _main_key_fn(beam, beam_lm_ext, beam_freq_map,
lm, parangles, pointing_errors,
antenna_scaling, frequencies,
dde_dims, ncorr):
return (beam.dtype, beam.ndim, beam_lm_ext.dtype, beam_freq_map.dtype,
lm.dtype, parangles.dtype, pointing_errors.dtype,
antenna_scaling.dtype, frequencies.dtype, dde_dims, ncorr)
# Value to use in a bit shift to recover channel from flattened
# channel/correlation index
_corr_shifter = {4: 2, 2: 1, 1: 0}
@memoize_on_key(_main_key_fn)
def _generate_main_kernel(beam, beam_lm_ext, beam_freq_map,
lm, parangles, pointing_errors,
antenna_scaling, frequencies,
dde_dims, ncorr):
beam_lw, beam_mh, beam_nud = beam.shape[:3]
if beam_lw < 2 or beam_mh < 2 or beam_nud < 2:
raise ValueError("(beam_lw, beam_mh, beam_nud) < 2 "
"to linearly interpolate")
# Create template
render = jinja_env.get_template(str(_MAIN_TEMPLATE_PATH)).render
name = "beam_cube_dde"
dtype = beam.dtype
if dtype == np.complex64:
block = (32, 32, 1)
elif dtype == np.complex128:
block = (32, 16, 1)
else:
raise TypeError("Need complex beam cube '%s'" % beam.dtype)
try:
corr_shift = _corr_shifter[ncorr]
except KeyError:
raise ValueError("Number of Correlations not in %s"
% list(_corr_shifter.keys()))
coord_type = np.result_type(beam_lm_ext, lm, parangles,
pointing_errors, antenna_scaling,
np.float32)
assert coord_type in (np.float32, np.float64)
code = render(kernel_name=name,
blockdimx=block[0],
blockdimy=block[1],
blockdimz=block[2],
corr_shift=corr_shift,
ncorr=ncorr,
beam_nud_limit=BEAM_NUD_LIMIT,
# Beam type and manipulation functions
beam_type=_get_typename(beam.real.dtype),
beam_dims=beam.ndim,
make2_beam_fn=cuda_function('make2', beam.real.dtype),
beam_sqrt_fn=cuda_function('sqrt', beam.real.dtype),
beam_rsqrt_fn=cuda_function('rsqrt', beam.real.dtype),
# Coordinate type and manipulation functions
FT=_get_typename(coord_type),
floor_fn=cuda_function('floor', coord_type),
min_fn=cuda_function('min', coord_type),
max_fn=cuda_function('max', coord_type),
cos_fn=cuda_function('cos', coord_type),
sin_fn=cuda_function('sin', coord_type),
lm_ext_type=_get_typename(beam_lm_ext.dtype),
beam_freq_type=_get_typename(beam_freq_map.dtype),
lm_type=_get_typename(lm.dtype),
pa_type=_get_typename(parangles.dtype),
pe_type=_get_typename(pointing_errors.dtype),
as_type=_get_typename(antenna_scaling.dtype),
freq_type=_get_typename(frequencies.dtype),
dde_type=_get_typename(beam.real.dtype),
dde_dims=dde_dims)
code = code.encode('utf-8')
# Complex output type
return cp.RawKernel(code, name), block, dtype
@requires_optional('cupy', opt_import_error)
def beam_cube_dde(beam, beam_lm_ext, beam_freq_map,
lm, parangles, pointing_errors,
antenna_scaling, frequencies):
corrs = beam.shape[3:]
if beam.shape[2] >= BEAM_NUD_LIMIT:
raise ValueError("beam_nud exceeds %d" % BEAM_NUD_LIMIT)
nsrc = lm.shape[0]
ntime, na = parangles.shape
nchan = frequencies.shape[0]
ncorr = reduce(mul, corrs, 1)
nchancorr = nchan*ncorr
oshape = (nsrc, ntime, na, nchan) + corrs
if len(corrs) > 1:
# Flatten the beam correlation dims
fbeam = beam.reshape(beam.shape[:3] + (ncorr,))
else:
fbeam = beam
# Generate frequency interpolation kernel
ikernel, iblock, idt = _generate_interp_kernel(beam_freq_map, frequencies)
# Generate main beam cube kernel
kernel, block, dtype = _generate_main_kernel(fbeam, beam_lm_ext,
beam_freq_map,
lm, parangles,
pointing_errors,
antenna_scaling,
frequencies,
len(oshape),
ncorr)
# Call frequency interpolation kernel
igrid = grids((nchan, 1, 1), iblock)
freq_data = cp.empty((3, nchan), dtype=frequencies.dtype)
try:
ikernel(igrid, iblock, (frequencies, beam_freq_map, freq_data))
except CompileException:
log.exception(format_code(ikernel.code))
raise
# Call main beam cube kernel
out = cp.empty((nsrc, ntime, na, nchan) + (ncorr,), dtype=beam.dtype)
grid = grids((nchancorr, na, ntime), block)
try:
kernel(grid, block, (fbeam, beam_lm_ext, beam_freq_map,
lm, parangles, pointing_errors,
antenna_scaling, frequencies, freq_data,
nsrc, out))
except CompileException:
log.exception(format_code(kernel.code))
raise
return out.reshape(oshape)
try:
beam_cube_dde.__doc__ = BEAM_CUBE_DOCS.substitute(
array_type=":class:`cupy.ndarray`")
except AttributeError:
pass
```
#### File: rime/cuda/predict.py
```python
from functools import reduce
import logging
from operator import mul
from os.path import join as pjoin
import numpy as np
from africanus.rime.predict import (PREDICT_DOCS, predict_checks)
from africanus.util.code import format_code, memoize_on_key
from africanus.util.cuda import cuda_type, grids
from africanus.util.jinja2 import jinja_env
from africanus.util.requirements import requires_optional
try:
import cupy as cp
from cupy.cuda.compiler import CompileException
except ImportError as e:
opt_import_error = e
else:
opt_import_error = None
log = logging.getLogger(__name__)
_TEMPLATE_PATH = pjoin("rime", "cuda", "predict.cu.j2")
def _key_fn(*args):
""" Hash on array datatypes and rank """
return tuple((a.dtype, a.ndim)
if isinstance(a, (np.ndarray, cp.ndarray))
else a for a in args)
@memoize_on_key(_key_fn)
def _generate_kernel(time_index, antenna1, antenna2,
dde1_jones, source_coh, dde2_jones,
die1_jones, base_vis, die2_jones,
corrs, out_ndim):
tup = predict_checks(time_index, antenna1, antenna2,
dde1_jones, source_coh, dde2_jones,
die1_jones, base_vis, die2_jones)
(have_ddes1, have_coh, have_ddes2, have_dies1, have_bvis, have_dies2) = tup
# Check types
if time_index.dtype != np.int32:
raise TypeError("time_index.dtype != np.int32 '%s'" % time_index.dtype)
if antenna1.dtype != np.int32:
raise TypeError("antenna1.dtype != np.int32 '%s'" % antenna1.dtype)
if antenna2.dtype != np.int32:
raise TypeError("antenna2.dtype != np.int32 '%s'" % antenna2.dtype)
# Create template
render = jinja_env.get_template(_TEMPLATE_PATH).render
name = "predict_vis"
# Complex output type
out_dtype = np.result_type(dde1_jones, source_coh, dde2_jones,
die1_jones, base_vis, die2_jones)
ncorrs = reduce(mul, corrs, 1)
# corrs x channels, rows
blockdimx = 32
blockdimy = 24 if out_dtype == np.complex128 else 32
block = (blockdimx, blockdimy, 1)
code = render(kernel_name=name, blockdimx=blockdimx, blockdimy=blockdimy,
have_dde1=have_ddes1,
dde1_type=cuda_type(dde1_jones) if have_ddes1 else "int",
dde1_ndim=dde1_jones.ndim if have_ddes1 else 1,
have_dde2=have_ddes2,
dde2_type=cuda_type(dde2_jones) if have_ddes2 else "int",
dde2_ndim=dde2_jones.ndim if have_ddes2 else 1,
have_coh=have_coh,
coh_type=cuda_type(source_coh) if have_coh else "int",
coh_ndim=source_coh.ndim if have_coh else 1,
have_die1=have_dies1,
die1_type=cuda_type(die1_jones) if have_dies1 else "int",
die1_ndim=die1_jones.ndim if have_dies1 else 1,
have_base_vis=have_bvis,
base_vis_type=cuda_type(base_vis) if have_bvis else "int",
base_vis_ndim=base_vis.ndim if have_bvis else 1,
have_die2=have_dies2,
die2_type=cuda_type(die2_jones) if have_dies2 else "int",
die2_ndim=die2_jones.ndim if have_dies2 else 1,
out_type=cuda_type(out_dtype),
corrs=ncorrs,
out_ndim=out_ndim,
warp_size=32).encode('utf-8')
return cp.RawKernel(code, name), block, out_dtype
@requires_optional("cupy", opt_import_error)
def predict_vis(time_index, antenna1, antenna2,
dde1_jones=None, source_coh=None, dde2_jones=None,
die1_jones=None, base_vis=None, die2_jones=None):
""" Cupy implementation of the feed_rotation kernel. """
have_ddes = dde1_jones is not None and dde2_jones is not None
have_dies = die1_jones is not None and die2_jones is not None
have_coh = source_coh is not None
have_bvis = base_vis is not None
# Infer the output shape
if have_ddes:
row = time_index.shape[0]
chan = dde1_jones.shape[3]
corrs = dde1_jones.shape[4:]
elif have_coh:
row = time_index.shape[0]
chan = source_coh.shape[2]
corrs = source_coh.shape[3:]
elif have_dies:
row = time_index.shape[0]
chan = die1_jones.shape[2]
corrs = die1_jones.shape[3:]
elif have_bvis:
row = time_index.shape[0]
chan = base_vis.shape[1]
corrs = base_vis.shape[2:]
else:
raise ValueError("Insufficient inputs supplied for determining "
"the output shape")
ncorrs = len(corrs)
# Flatten correlations
if ncorrs == 2:
flat_corrs = reduce(mul, corrs, 1)
if have_ddes:
dde_shape = dde1_jones.shape[:-ncorrs] + (flat_corrs,)
dde1_jones = dde1_jones.reshape(dde_shape)
dde2_jones = dde2_jones.reshape(dde_shape)
if have_coh:
coh_shape = source_coh.shape[:-ncorrs] + (flat_corrs,)
source_coh = source_coh.reshape(coh_shape)
if have_dies:
die_shape = die1_jones.shape[:-ncorrs] + (flat_corrs,)
die1_jones = die1_jones.reshape(die_shape)
die2_jones = die2_jones.reshape(die_shape)
if have_bvis:
bvis_shape = base_vis.shape[:-ncorrs] + (flat_corrs,)
base_vis = base_vis.reshape(bvis_shape)
elif ncorrs == 1:
flat_corrs = corrs[0]
else:
raise ValueError("Invalid correlation setup %s" % (corrs,))
out_shape = (row, chan) + (flat_corrs,)
kernel, block, out_dtype = _generate_kernel(time_index,
antenna1,
antenna2,
dde1_jones,
source_coh,
dde2_jones,
die1_jones,
base_vis,
die2_jones,
corrs,
len(out_shape))
grid = grids((chan*flat_corrs, row, 1), block)
out = cp.empty(shape=out_shape, dtype=out_dtype)
# Normalise the time index
# TODO(sjperkins)
# Normalise the time index with a device-wide reduction
norm_time_index = time_index - time_index.min()
args = (norm_time_index, antenna1, antenna2,
dde1_jones, source_coh, dde2_jones,
die1_jones, base_vis, die2_jones,
out)
try:
kernel(grid, block, tuple(a for a in args if a is not None))
except CompileException:
log.exception(format_code(kernel.code))
raise
return out.reshape((row, chan) + corrs)
try:
predict_vis.__doc__ = PREDICT_DOCS.substitute(
array_type=":class:`cupy.ndarray`",
get_time_index=":code:`cp.unique(time, "
"return_inverse=True)[1]`",
extra_args="",
extra_notes="")
except AttributeError:
pass
```
#### File: cuda/tests/test_cuda_predict.py
```python
import numpy as np
import pytest
from africanus.rime.predict import predict_vis as np_predict_vis
from africanus.rime.cuda.predict import predict_vis
from africanus.rime.tests.test_predict import (corr_shape_parametrization,
die_presence_parametrization,
dde_presence_parametrization,
chunk_parametrization,
rc)
@corr_shape_parametrization
@dde_presence_parametrization
@die_presence_parametrization
@chunk_parametrization
def test_cuda_predict_vis(corr_shape, idm, einsum_sig1, einsum_sig2,
a1j, blj, a2j, g1j, bvis, g2j,
chunks):
np.random.seed(40)
cp = pytest.importorskip('cupy')
s = sum(chunks['source'])
t = sum(chunks['time'])
a = sum(chunks['antenna'])
c = sum(chunks['channels'])
r = sum(chunks['rows'])
a1_jones = rc((s, t, a, c) + corr_shape)
bl_jones = rc((s, r, c) + corr_shape)
a2_jones = rc((s, t, a, c) + corr_shape)
g1_jones = rc((t, a, c) + corr_shape)
base_vis = rc((r, c) + corr_shape)
g2_jones = rc((t, a, c) + corr_shape)
# Add 10 to the index to test time index normalisation
time_idx = np.concatenate([np.full(rows, i+10, dtype=np.int32)
for i, rows in enumerate(chunks['rows'])])
ant1 = np.concatenate([np.random.randint(0, a, rows, dtype=np.int32)
for rows in chunks['rows']])
ant2 = np.concatenate([np.random.randint(0, a, rows, dtype=np.int32)
for rows in chunks['rows']])
assert ant1.size == r
model_vis = predict_vis(cp.asarray(time_idx),
cp.asarray(ant1),
cp.asarray(ant2),
cp.asarray(a1_jones) if a1j else None,
cp.asarray(bl_jones) if blj else None,
cp.asarray(a2_jones) if a2j else None,
cp.asarray(g1_jones) if g1j else None,
cp.asarray(base_vis) if bvis else None,
cp.asarray(g2_jones) if g2j else None)
np_model_vis = np_predict_vis(time_idx,
ant1,
ant2,
a1_jones if a1j else None,
bl_jones if blj else None,
a2_jones if a2j else None,
g1_jones if g1j else None,
base_vis if bvis else None,
g2_jones if g2j else None)
np.testing.assert_array_almost_equal(cp.asnumpy(model_vis), np_model_vis)
```
#### File: rime/examples/predict.py
```python
import argparse
from collections import namedtuple
from functools import lru_cache
from operator import getitem
import weakref
import numpy as np
try:
from astropy.io import fits
import dask
import dask.array as da
from dask.diagnostics import ProgressBar
import Tigger
from daskms import xds_from_ms, xds_from_table, xds_to_table
except ImportError as e:
opt_import_error = e
else:
opt_import_error = None
from africanus.util.beams import beam_filenames, beam_grids
from africanus.coordinates.dask import radec_to_lm
from africanus.rime.dask import (phase_delay, predict_vis, parallactic_angles,
beam_cube_dde, feed_rotation)
from africanus.model.coherency.dask import convert
from africanus.model.spectral.dask import spectral_model
from africanus.model.shape.dask import gaussian as gaussian_shape
from africanus.util.requirements import requires_optional
_einsum_corr_indices = 'ijkl'
def _brightness_schema(corrs, index):
if corrs == 4:
return "sf" + _einsum_corr_indices[index:index + 2], index + 1
else:
return "sfi", index
def _phase_delay_schema(corrs, index):
return "srf", index
def _spi_schema(corrs, index):
return "s", index
def _gauss_shape_schema(corrs, index):
return "srf", index
def _bl_jones_output_schema(corrs, index):
if corrs == 4:
return "->srfi" + _einsum_corr_indices[index]
else:
return "->srfi"
_rime_term_map = {
'brightness': _brightness_schema,
'phase_delay': _phase_delay_schema,
'spi': _spi_schema,
'gauss_shape': _gauss_shape_schema,
}
def corr_schema(pol):
"""
Parameters
----------
pol : Dataset
Returns
-------
corr_schema : list of list
correlation schema from the POLARIZATION table,
`[[9, 10], [11, 12]]` for example
"""
# Select the single row out
corrs = pol.NUM_CORR.data[0]
corr_types = pol.CORR_TYPE.data[0]
if corrs == 4:
return [[corr_types[0], corr_types[1]],
[corr_types[2], corr_types[3]]] # (2, 2) shape
elif corrs == 2:
return [corr_types[0], corr_types[1]] # (2, ) shape
elif corrs == 1:
return [corr_types[0]] # (1, ) shape
else:
raise ValueError("corrs %d not in (1, 2, 4)" % corrs)
def baseline_jones_multiply(corrs, *args):
names = args[::2]
arrays = args[1::2]
input_einsum_schemas = []
corr_index = 0
for name, array in zip(names, arrays):
try:
# Obtain function for prescribing the input einsum schema
schema_fn = _rime_term_map[name]
except KeyError:
raise ValueError("Unknown RIME term '%s'" % name)
else:
# Extract it and the next corr index
einsum_schema, corr_index = schema_fn(corrs, corr_index)
input_einsum_schemas.append(einsum_schema)
if not len(einsum_schema) == array.ndim:
raise ValueError("%s len(%s) == %d != %s.ndim"
% (name, einsum_schema,
len(einsum_schema), array.shape))
output_schema = _bl_jones_output_schema(corrs, corr_index)
schema = ",".join(input_einsum_schemas) + output_schema
return da.einsum(schema, *arrays)
def create_parser():
p = argparse.ArgumentParser()
p.add_argument("ms")
p.add_argument("-sm", "--sky-model", default="sky-model.txt")
p.add_argument("-rc", "--row-chunks", type=int, default=10000)
p.add_argument("-mc", "--model-chunks", type=int, default=10)
p.add_argument("-b", "--beam", default=None)
p.add_argument("-l", "--l-axis", default="L")
p.add_argument("-m", "--m-axis", default="M")
p.add_argument("-iuvw", "--invert-uvw", action="store_true",
help="Invert UVW coordinates. Useful if we want "
"compare our visibilities against MeqTrees")
return p
@lru_cache(maxsize=16)
def load_beams(beam_file_schema, corr_types, l_axis, m_axis):
class FITSFile(object):
""" Exists so that fits file is closed when last ref is gc'd """
def __init__(self, filename):
self.hdul = hdul = fits.open(filename)
assert len(hdul) == 1
self.__del_ref = weakref.ref(self, lambda r: hdul.close())
# Open files and get headers
beam_files = []
headers = []
for corr, (re, im) in beam_filenames(beam_file_schema, corr_types).items():
re_f = FITSFile(re)
im_f = FITSFile(im)
beam_files.append((corr, (re_f, im_f)))
headers.append((corr, (re_f.hdul[0].header, im_f.hdul[0].header)))
# All FITS headers should agree (apart from DATE)
flat_headers = []
for corr, (re_header, im_header) in headers:
if "DATE" in re_header:
del re_header["DATE"]
if "DATE" in im_header:
del im_header["DATE"]
flat_headers.append(re_header)
flat_headers.append(im_header)
if not all(flat_headers[0] == h for h in flat_headers[1:]):
raise ValueError("BEAM FITS Header Files differ")
# Map FITS header type to NumPy type
BITPIX_MAP = {8: np.dtype('uint8').type, 16: np.dtype('int16').type,
32: np.dtype('int32').type, -32: np.dtype('float32').type,
-64: np.dtype('float64').type}
header = flat_headers[0]
bitpix = header['BITPIX']
try:
dtype = BITPIX_MAP[bitpix]
except KeyError:
raise ValueError("No mapping from BITPIX %s to a numpy type" % bitpix)
else:
dtype = np.result_type(dtype, np.complex64)
if not header['NAXIS'] == 3:
raise ValueError("FITS must have exactly three axes. "
"L or X, M or Y and FREQ. NAXIS != 3")
(l_ax, l_grid), (m_ax, m_grid), (nu_ax, nu_grid) = beam_grids(header,
l_axis,
m_axis)
# Shape of each correlation
shape = (l_grid.shape[0], m_grid.shape[0], nu_grid.shape[0])
# Axis tranpose, FITS is FORTRAN ordered
ax = (nu_ax - 1, m_ax - 1, l_ax - 1)
def _load_correlation(re, im, ax):
# Read real and imaginary for each correlation
return (re.hdul[0].data.transpose(ax) +
im.hdul[0].data.transpose(ax)*1j)
# Create delayed loads of the beam
beam_loader = dask.delayed(_load_correlation)
beam_corrs = [beam_loader(re, im, ax)
for c, (corr, (re, im)) in enumerate(beam_files)]
beam_corrs = [da.from_delayed(bc, shape=shape, dtype=dtype)
for bc in beam_corrs]
# Stack correlations and rechunk to one great big block
beam = da.stack(beam_corrs, axis=3)
beam = beam.rechunk(shape + (len(corr_types),))
# Dask arrays for the beam extents and beam frequency grid
beam_lm_ext = np.array([[l_grid[0], l_grid[-1]], [m_grid[0], m_grid[-1]]])
beam_lm_ext = da.from_array(beam_lm_ext, chunks=beam_lm_ext.shape)
beam_freq_grid = da.from_array(nu_grid, chunks=nu_grid.shape)
return beam, beam_lm_ext, beam_freq_grid
def parse_sky_model(filename, chunks):
"""
Parses a Tigger sky model
Parameters
----------
filename : str
Sky Model filename
chunks : tuple of ints or int
Source chunking strategy
Returns
-------
source_data : dict
Dictionary of source data,
:code:`{'point': (...), 'gauss': (...) }`
"""
sky_model = Tigger.load(filename, verbose=False)
_empty_spectrum = object()
point_radec = []
point_stokes = []
point_spi = []
point_ref_freq = []
gauss_radec = []
gauss_stokes = []
gauss_spi = []
gauss_ref_freq = []
gauss_shape = []
for source in sky_model.sources:
ra = source.pos.ra
dec = source.pos.dec
typecode = source.typecode.lower()
I = source.flux.I # noqa
Q = source.flux.Q
U = source.flux.U
V = source.flux.V
spectrum = (getattr(source, "spectrum", _empty_spectrum)
or _empty_spectrum)
try:
# Extract reference frequency
ref_freq = spectrum.freq0
except AttributeError:
ref_freq = sky_model.freq0
try:
# Extract SPI for I.
# Zero Q, U and V to get 1 on the exponential
spi = [[spectrum.spi]*4]
except AttributeError:
# Default I SPI to -0.7
spi = [[0, 0, 0, 0]]
if typecode == "gau":
emaj = source.shape.ex
emin = source.shape.ey
pa = source.shape.pa
gauss_radec.append([ra, dec])
gauss_stokes.append([I, Q, U, V])
gauss_spi.append(spi)
gauss_ref_freq.append(ref_freq)
gauss_shape.append([emaj, emin, pa])
elif typecode == "pnt":
point_radec.append([ra, dec])
point_stokes.append([I, Q, U, V])
point_spi.append(spi)
point_ref_freq.append(ref_freq)
else:
raise ValueError("Unknown source morphology %s" % typecode)
Point = namedtuple("Point", ["radec", "stokes", "spi", "ref_freq"])
Gauss = namedtuple("Gauss", ["radec", "stokes", "spi", "ref_freq",
"shape"])
source_data = {}
if len(point_radec) > 0:
source_data['point'] = Point(
da.from_array(point_radec, chunks=(chunks, -1)),
da.from_array(point_stokes, chunks=(chunks, -1)),
da.from_array(point_spi, chunks=(chunks, 1, -1)),
da.from_array(point_ref_freq, chunks=chunks))
if len(gauss_radec) > 0:
source_data['gauss'] = Gauss(
da.from_array(gauss_radec, chunks=(chunks, -1)),
da.from_array(gauss_stokes, chunks=(chunks, -1)),
da.from_array(gauss_spi, chunks=(chunks, 1, -1)),
da.from_array(gauss_ref_freq, chunks=chunks),
da.from_array(gauss_shape, chunks=(chunks, -1)))
return source_data
def support_tables(args):
"""
Parameters
----------
args : object
Script argument objects
Returns
-------
table_map : dict of Dataset
{name: dataset}
"""
n = {k: '::'.join((args.ms, k)) for k
in ("ANTENNA", "DATA_DESCRIPTION", "FIELD",
"SPECTRAL_WINDOW", "POLARIZATION")}
# All rows at once
lazy_tables = {"ANTENNA": xds_from_table(n["ANTENNA"])}
compute_tables = {
# Fixed shape rows
"DATA_DESCRIPTION": xds_from_table(n["DATA_DESCRIPTION"]),
# Variably shaped, need a dataset per row
"FIELD": xds_from_table(n["FIELD"],
group_cols="__row__"),
"SPECTRAL_WINDOW": xds_from_table(n["SPECTRAL_WINDOW"],
group_cols="__row__"),
"POLARIZATION": xds_from_table(n["POLARIZATION"],
group_cols="__row__"),
}
lazy_tables.update(dask.compute(compute_tables)[0])
return lazy_tables
def _zero_pes(parangles, frequency, dtype_):
""" Create zeroed pointing errors """
ntime, na = parangles.shape
nchan = frequency.shape[0]
return np.zeros((ntime, na, nchan, 2), dtype=dtype_)
def _unity_ant_scales(parangles, frequency, dtype_):
""" Create zeroed antenna scalings """
_, na = parangles[0].shape
nchan = frequency.shape[0]
return np.ones((na, nchan, 2), dtype=dtype_)
def dde_factory(args, ms, ant, field, pol, lm, utime, frequency):
if args.beam is None:
return None
# Beam is requested
corr_type = tuple(pol.CORR_TYPE.data[0])
if not len(corr_type) == 4:
raise ValueError("Need four correlations for DDEs")
parangles = parallactic_angles(utime, ant.POSITION.data,
field.PHASE_DIR.data[0][0])
corr_type_set = set(corr_type)
if corr_type_set.issubset(set([9, 10, 11, 12])):
pol_type = 'linear'
elif corr_type_set.issubset(set([5, 6, 7, 8])):
pol_type = 'circular'
else:
raise ValueError("Cannot determine polarisation type "
"from correlations %s. Constructing "
"a feed rotation matrix will not be "
"possible." % (corr_type,))
# Construct feed rotation
feed_rot = feed_rotation(parangles, pol_type)
dtype = np.result_type(parangles, frequency)
# Create zeroed pointing errors
zpe = da.blockwise(_zero_pes, ("time", "ant", "chan", "comp"),
parangles, ("time", "ant"),
frequency, ("chan",),
dtype, None,
new_axes={"comp": 2},
dtype=dtype)
# Created zeroed antenna scaling factors
zas = da.blockwise(_unity_ant_scales, ("ant", "chan", "comp"),
parangles, ("time", "ant"),
frequency, ("chan",),
dtype, None,
new_axes={"comp": 2},
dtype=dtype)
# Load the beam information
beam, lm_ext, freq_map = load_beams(args.beam, corr_type,
args.l_axis, args.m_axis)
# Introduce the correlation axis
beam = beam.reshape(beam.shape[:3] + (2, 2))
beam_dde = beam_cube_dde(beam, lm_ext, freq_map, lm, parangles,
zpe, zas,
frequency)
# Multiply the beam by the feed rotation to form the DDE term
return da.einsum("stafij,tajk->stafik", beam_dde, feed_rot)
def vis_factory(args, source_type, sky_model,
ms, ant, field, spw, pol):
try:
source = sky_model[source_type]
except KeyError:
raise ValueError("Source type '%s' unsupported" % source_type)
# Select single dataset rows
corrs = pol.NUM_CORR.data[0]
frequency = spw.CHAN_FREQ.data[0]
phase_dir = field.PHASE_DIR.data[0][0] # row, poly
lm = radec_to_lm(source.radec, phase_dir)
uvw = -ms.UVW.data if args.invert_uvw else ms.UVW.data
# (source, row, frequency)
phase = phase_delay(lm, uvw, frequency)
# (source, spi, corrs)
# Apply spectral mode to stokes parameters
stokes = spectral_model(source.stokes,
source.spi,
source.ref_freq,
frequency,
base=0)
brightness = convert(stokes, ["I", "Q", "U", "V"],
corr_schema(pol))
bl_jones_args = ["phase_delay", phase]
# Add any visibility amplitude terms
if source_type == "gauss":
bl_jones_args.append("gauss_shape")
bl_jones_args.append(gaussian_shape(uvw, frequency, source.shape))
bl_jones_args.extend(["brightness", brightness])
# Unique times and time index for each row chunk
# The index is not global
meta = np.empty((0,), dtype=tuple)
utime_inv = ms.TIME.data.map_blocks(np.unique, return_inverse=True,
meta=meta, dtype=tuple)
# Need unique times for parallactic angles
nan_chunks = (tuple(np.nan for _ in utime_inv.chunks[0]),)
utime = utime_inv.map_blocks(getitem, 0,
chunks=nan_chunks,
dtype=ms.TIME.dtype)
time_idx = utime_inv.map_blocks(getitem, 1, dtype=np.int32)
jones = baseline_jones_multiply(corrs, *bl_jones_args)
dde = dde_factory(args, ms, ant, field, pol, lm, utime, frequency)
return predict_vis(time_idx, ms.ANTENNA1.data, ms.ANTENNA2.data,
dde, jones, dde, None, None, None)
@requires_optional("dask.array", "Tigger",
"daskms", opt_import_error)
def predict(args):
# Convert source data into dask arrays
sky_model = parse_sky_model(args.sky_model, args.model_chunks)
# Get the support tables
tables = support_tables(args)
ant_ds = tables["ANTENNA"]
field_ds = tables["FIELD"]
ddid_ds = tables["DATA_DESCRIPTION"]
spw_ds = tables["SPECTRAL_WINDOW"]
pol_ds = tables["POLARIZATION"]
# List of write operations
writes = []
# Construct a graph for each DATA_DESC_ID
for xds in xds_from_ms(args.ms,
columns=["UVW", "ANTENNA1", "ANTENNA2", "TIME"],
group_cols=["FIELD_ID", "DATA_DESC_ID"],
chunks={"row": args.row_chunks}):
# Perform subtable joins
ant = ant_ds[0]
field = field_ds[xds.attrs['FIELD_ID']]
ddid = ddid_ds[xds.attrs['DATA_DESC_ID']]
spw = spw_ds[ddid.SPECTRAL_WINDOW_ID.data[0]]
pol = pol_ds[ddid.POLARIZATION_ID.data[0]]
# Select single dataset row out
corrs = pol.NUM_CORR.data[0]
# Generate visibility expressions for each source type
source_vis = [vis_factory(args, stype, sky_model,
xds, ant, field, spw, pol)
for stype in sky_model.keys()]
# Sum visibilities together
vis = sum(source_vis)
# Reshape (2, 2) correlation to shape (4,)
if corrs == 4:
vis = vis.reshape(vis.shape[:2] + (4,))
# Assign visibilities to MODEL_DATA array on the dataset
xds = xds.assign(MODEL_DATA=(("row", "chan", "corr"), vis))
# Create a write to the table
write = xds_to_table(xds, args.ms, ['MODEL_DATA'])
# Add to the list of writes
writes.append(write)
# Submit all graph computations in parallel
with ProgressBar():
dask.compute(writes)
if __name__ == "__main__":
args = create_parser().parse_args()
predict(args)
```
#### File: africanus/rime/parangles_astropy.py
```python
from africanus.util.requirements import requires_optional
try:
from astropy.coordinates import (EarthLocation, SkyCoord,
AltAz, CIRS)
from astropy.time import Time
from astropy import units
except ImportError as e:
astropy_import_error = e
have_astropy_parangles = False
else:
astropy_import_error = None
have_astropy_parangles = True
@requires_optional('astropy', astropy_import_error)
def astropy_parallactic_angles(times, antenna_positions, field_centre):
"""
Computes parallactic angles per timestep for the given
reference antenna position and field centre.
"""
ap = antenna_positions
fc = field_centre
# Convert from MJD second to MJD
times = Time(times / 86400.00, format='mjd', scale='utc')
ap = EarthLocation.from_geocentric(
ap[:, 0], ap[:, 1], ap[:, 2], unit='m')
fc = SkyCoord(ra=fc[0], dec=fc[1], unit=units.rad, frame='fk5')
pole = SkyCoord(ra=0, dec=90, unit=units.deg, frame='fk5')
cirs_frame = CIRS(obstime=times)
pole_cirs = pole.transform_to(cirs_frame)
fc_cirs = fc.transform_to(cirs_frame)
altaz_frame = AltAz(location=ap[None, :], obstime=times[:, None])
pole_altaz = pole_cirs[:, None].transform_to(altaz_frame)
fc_altaz = fc_cirs[:, None].transform_to(altaz_frame)
return fc_altaz.position_angle(pole_altaz)
```
#### File: rime/tests/test_rime.py
```python
import numpy as np
import pytest
def rf(*a, **kw):
return np.random.random(*a, **kw)
def rc(*a, **kw):
return rf(*a, **kw) + 1j*rf(*a, **kw)
@pytest.mark.parametrize("convention, sign", [
('fourier', 1),
('casa', -1)
])
def test_phase_delay(convention, sign):
from africanus.rime import phase_delay
uvw = np.random.random(size=(100, 3))
lm = np.random.random(size=(10, 2))
frequency = np.linspace(.856e9, .856e9*2, 64, endpoint=True)
from africanus.constants import minus_two_pi_over_c
# Test complex phase at a particular index in the output
uvw_i, lm_i, freq_i = 2, 3, 5
u, v, w = [1, 2, 3]
l, m = [0.1, 0.2]
freq = 0.856e9
# Set up values in the input
uvw[uvw_i] = [u, v, w]
lm[lm_i] = [l, m]
frequency[freq_i] = freq
# Compute complex phase
complex_phase = phase_delay(lm, uvw, frequency, convention=convention)
# Test singular value vs a point in the output
n = np.sqrt(1.0 - l**2 - m**2) - 1.0
phase = sign*minus_two_pi_over_c*(u*l + v*m + w*n)*freq
assert np.all(np.exp(1j*phase) == complex_phase[lm_i, uvw_i, freq_i])
def test_feed_rotation():
import numpy as np
from africanus.rime import feed_rotation
parangles = np.random.random((10, 5))
pa_sin = np.sin(parangles)
pa_cos = np.cos(parangles)
fr = feed_rotation(parangles, feed_type='linear')
np_expr = np.stack([pa_cos, pa_sin, -pa_sin, pa_cos], axis=2)
assert np.allclose(fr, np_expr.reshape(10, 5, 2, 2))
fr = feed_rotation(parangles, feed_type='circular')
zeros = np.zeros_like(pa_sin)
np_expr = np.stack([pa_cos - 1j*pa_sin, zeros,
zeros, pa_cos + 1j*pa_sin], axis=2)
assert np.allclose(fr, np_expr.reshape(10, 5, 2, 2))
@pytest.mark.parametrize("convention, sign", [
('fourier', 1),
('casa', -1)
])
def test_dask_phase_delay(convention, sign):
da = pytest.importorskip('dask.array')
from africanus.rime import phase_delay as np_phase_delay
from africanus.rime.dask import phase_delay as dask_phase_delay
# So that 1 > 1 - l**2 - m**2 >= 0
lm = np.random.random(size=(10, 2))*0.01
uvw = np.random.random(size=(100, 3))
frequency = np.linspace(.856e9, .856e9*2, 64, endpoint=True)
dask_lm = da.from_array(lm, chunks=(5, 2))
dask_uvw = da.from_array(uvw, chunks=(25, 3))
dask_frequency = da.from_array(frequency, chunks=16)
dask_phase = dask_phase_delay(dask_lm, dask_uvw, dask_frequency,
convention=convention)
np_phase = np_phase_delay(lm, uvw, frequency, convention=convention)
# Should agree completely
assert np.all(np_phase == dask_phase.compute())
def test_dask_feed_rotation():
da = pytest.importorskip('dask.array')
import numpy as np
from africanus.rime import feed_rotation as np_feed_rotation
from africanus.rime.dask import feed_rotation
parangles = np.random.random((10, 5))
dask_parangles = da.from_array(parangles, chunks=(5, (2, 3)))
np_fr = np_feed_rotation(parangles, feed_type='linear')
assert np.all(np_fr == feed_rotation(dask_parangles, feed_type='linear'))
np_fr = np_feed_rotation(parangles, feed_type='circular')
assert np.all(np_fr == feed_rotation(dask_parangles, feed_type='circular'))
```
#### File: africanus/util/cuda.py
```python
import numpy as np
_array_types = [np.ndarray]
try:
import dask.array as da
except ImportError:
pass
else:
_array_types.append(da.Array)
try:
import cupy as cp
except ImportError:
pass
else:
_array_types.append(cp.ndarray)
_array_types = tuple(_array_types)
cuda_fns = {
np.dtype(np.float32): {
'abs': 'fabsf',
'cos': 'cosf',
'floor': 'floorf',
'make2': 'make_float2',
'max': 'fmaxf',
'min': 'fminf',
'rsqrt': 'rsqrtf',
'sqrt': 'sqrtf',
'sin': 'sinf',
'sincos': 'sincosf',
'sincospi': 'sincospif',
},
np.dtype(np.float64): {
'abs': 'fabs',
'cos': 'cos',
'floor': 'floor',
'make2': 'make_double2',
'max': 'fmax',
'min': 'fmin',
'rsqrt': 'rsqrt',
'sin': 'sin',
'sincos': 'sincos',
'sincospi': 'sincospi',
'sqrt': 'sqrt',
},
}
numpy_to_cuda_type_map = {
np.dtype('int8'): "char",
np.dtype('uint8'): "unsigned char",
np.dtype('int16'): "short",
np.dtype('uint16'): "unsigned short",
np.dtype('int32'): "int",
np.dtype('uint32'): "unsigned int",
np.dtype('float32'): "float",
np.dtype('float64'): "double",
np.dtype('complex64'): "float2",
np.dtype('complex128'): "double2"
}
# Also map the types
numpy_to_cuda_type_map.update({k.type: v
for k, v
in numpy_to_cuda_type_map.items()})
def grids(dims, blocks):
"""
Determine the grid size, given space dimensions sizes and blocks
Parameters
----------
dims : tuple of ints
`(x, y, z)` tuple
Returns
-------
tuple
`(x, y, z)` grid size tuple
"""
if not len(dims) == 3:
raise ValueError("dims must be an (x, y, z) tuple. "
"CUDA dimension ordering is inverted compared "
"to NumPy")
if not len(blocks) == 3:
raise ValueError("blocks must be an (x, y, z) tuple. "
"CUDA dimension ordering is inverted compared "
"to NumPy")
return tuple((d + b - 1) // b for d, b in zip(dims, blocks))
def cuda_function(function_name, dtype):
try:
type_map = cuda_fns[dtype]
except KeyError:
raise ValueError("No registered functions for type %s" % dtype)
try:
return type_map[function_name]
except KeyError:
raise ValueError("Unknown CUDA function %s" % function_name)
def cuda_type(dtype):
if isinstance(dtype, _array_types):
dtype = dtype.dtype
try:
return numpy_to_cuda_type_map[dtype]
except KeyError:
raise ValueError("No registered map for type %s" % dtype)
```
#### File: africanus/util/docs.py
```python
import os
import re
from string import Template
_on_rtd = bool(os.environ.get("READTHEDOCS"))
def on_rtd():
return _on_rtd
def mod_docs(docstring, replacements):
for search, replace in replacements:
docstring = docstring.replace(search, replace)
return docstring
def doc_tuple_to_str(doc_tuple, replacements=None):
fields = getattr(doc_tuple, "_fields", None)
if fields is not None:
fields = (getattr(doc_tuple, f) for f in doc_tuple._fields)
elif isinstance(doc_tuple, dict):
fields = doc_tuple.values()
if replacements is not None:
fields = (mod_docs(f, replacements) for f in fields)
return ''.join(fields)
class DefaultOut(object):
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return self.arg
__str__ = __repr__
class DocstringTemplate(Template):
"""
Overrides the ${identifer} braced pattern in the string Template
with a $(identifier) braced pattern
"""
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
\((?P<braced>%(id)s)\) | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
""" % {'delim': re.escape(Template.delimiter),
'id': Template.idpattern}
```
#### File: africanus/util/nvcc.py
```python
import ast
import contextlib
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from os.path import join as pjoin
from pkg_resources import resource_filename
import distutils
from distutils import errors
from distutils import msvccompiler
from distutils import unixccompiler
from africanus.util.code import format_code
from africanus.util.requirements import requires_optional
try:
import cupy as cp
except ImportError as e:
cupy_import_error = e
else:
cupy_import_error = None
log = logging.getLogger(__name__)
def get_path(key):
return os.environ.get(key, '').split(os.pathsep)
def search_on_path(filenames):
for p in get_path('PATH'):
for filename in filenames:
full = os.path.join(p, filename)
if os.path.exists(full):
return os.path.abspath(full)
return None
PLATFORM_DARWIN = sys.platform.startswith('darwin')
PLATFORM_LINUX = sys.platform.startswith('linux')
PLATFORM_WIN32 = sys.platform.startswith('win32')
minimum_cuda_version = 8000
minimum_cudnn_version = 5000
maximum_cudnn_version = 7999
_cuda_path = 'NOT_INITIALIZED'
_compiler_base_options = None
_cuda_info = None
@contextlib.contextmanager
def _tempdir():
temp_dir = tempfile.mkdtemp()
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def get_cuda_path():
global _cuda_path
# Use a magic word to represent the cache not filled because None is a
# valid return value.
if _cuda_path != 'NOT_INITIALIZED':
return _cuda_path
nvcc_path = search_on_path(('nvcc', 'nvcc.exe'))
cuda_path_default = None
if nvcc_path is None:
log.warn('nvcc not in path. Please set path to nvcc.')
else:
cuda_path_default = os.path.normpath(
os.path.join(os.path.dirname(nvcc_path), '..'))
cuda_path = os.environ.get('CUDA_PATH', '') # Nvidia default on Windows
if len(cuda_path) > 0 and cuda_path != cuda_path_default:
log.warn('nvcc path != CUDA_PATH')
log.warn('nvcc path: %s' % cuda_path_default)
log.warn('CUDA_PATH: %s' % cuda_path)
if os.path.exists(cuda_path):
_cuda_path = cuda_path
elif cuda_path_default is not None:
_cuda_path = cuda_path_default
elif os.path.exists('/usr/local/cuda'):
_cuda_path = '/usr/local/cuda'
else:
_cuda_path = None
return _cuda_path
def get_nvcc_path():
nvcc = os.environ.get('NVCC', None)
if nvcc:
return distutils.split_quoted(nvcc)
cuda_path = get_cuda_path()
if cuda_path is None:
return None
if PLATFORM_WIN32:
nvcc_bin = 'bin/nvcc.exe'
else:
nvcc_bin = 'bin/nvcc'
nvcc_path = os.path.join(cuda_path, nvcc_bin)
if os.path.exists(nvcc_path):
return [nvcc_path]
else:
return None
def get_compiler_setting():
cuda_path = get_cuda_path()
include_dirs = []
library_dirs = []
define_macros = []
if cuda_path:
include_dirs.append(os.path.join(cuda_path, 'include'))
if PLATFORM_WIN32:
library_dirs.append(os.path.join(cuda_path, 'bin'))
library_dirs.append(os.path.join(cuda_path, 'lib', 'x64'))
else:
library_dirs.append(os.path.join(cuda_path, 'lib64'))
library_dirs.append(os.path.join(cuda_path, 'lib'))
if PLATFORM_DARWIN:
library_dirs.append('/usr/local/cuda/lib')
if PLATFORM_WIN32:
nvtoolsext_path = os.environ.get('NVTOOLSEXT_PATH', '')
if os.path.exists(nvtoolsext_path):
include_dirs.append(os.path.join(nvtoolsext_path, 'include'))
library_dirs.append(os.path.join(nvtoolsext_path, 'lib', 'x64'))
else:
define_macros.append(('CUPY_NO_NVTX', '1'))
return {
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'define_macros': define_macros,
'language': 'c++',
}
def _match_output_lines(output_lines, regexs):
# Matches regular expressions `regexs` against `output_lines` and finds the
# consecutive matching lines from `output_lines`.
# `None` is returned if no match is found.
if len(output_lines) < len(regexs):
return None
matches = [None] * len(regexs)
for i in range(len(output_lines) - len(regexs)):
for j in range(len(regexs)):
m = re.match(regexs[j], output_lines[i + j])
if not m:
break
matches[j] = m
else:
# Match found
return matches
# No match
return None
def get_compiler_base_options():
"""Returns base options for nvcc compiler.
"""
global _compiler_base_options
if _compiler_base_options is None:
_compiler_base_options = _get_compiler_base_options()
return _compiler_base_options
def _get_compiler_base_options():
# Try compiling a dummy code.
# If the compilation fails, try to parse the output of compilation
# and try to compose base options according to it.
nvcc_path = get_nvcc_path()
with _tempdir() as temp_dir:
test_cu_path = os.path.join(temp_dir, 'test.cu')
test_out_path = os.path.join(temp_dir, 'test.out')
with open(test_cu_path, 'w') as f:
f.write('int main() { return 0; }')
proc = subprocess.Popen(
nvcc_path + ['-o', test_out_path, test_cu_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate()
stderrlines = stderrdata.split(b'\n')
if proc.returncode != 0:
# No supported host compiler
matches = _match_output_lines(
stderrlines,
[
b'^ERROR: No supported gcc/g\\+\\+ host compiler found, '
b'but .* is available.$',
b'^ *Use \'nvcc (.*)\' to use that instead.$',
])
if matches is not None:
base_opts = matches[1].group(1)
base_opts = base_opts.decode('utf8').split(' ')
return base_opts
# Unknown error
raise RuntimeError(
'Encountered unknown error while testing nvcc:\n' +
stderrdata.decode('utf8'))
return []
def _get_cuda_info():
nvcc_path = get_nvcc_path()
code = '''
#include <cuda.h>
#include <stdio.h>
int main(int argc, char* argv[]) {
int nDevices;
cudaGetDeviceCount(&nDevices);
printf("{\\n");
printf("'cuda_version': %d,\\n", CUDA_VERSION);
printf("'devices': [\\n");
for(int d=0; d < nDevices; ++d) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props, d);
printf("{\\n");
printf("'name' :'%s',\\n", props.name);
printf("'major' : %d,\\n", props.major);
printf("'minor' : %d,\\n", props.minor);
printf("'total_global_mem' : %u,\\n", props.totalGlobalMem);
printf("'warp_size' : %u,\\n", props.warpSize);
printf("'max_threads_per_block' : %u,\\n", props.maxThreadsPerBlock);
printf("'max_thread_size' : (%u,%u,%u),\\n",
props.maxThreadsDim[0],
props.maxThreadsDim[1],
props.maxThreadsDim[2]);
printf("'max_grid_size' : (%u,%u,%u),\\n",
props.maxGridSize[0],
props.maxGridSize[1],
props.maxGridSize[2]);
printf("'device_overlap' : %s,\\n", props.deviceOverlap ? "True" : "False");
printf("'async_engines' : %u,\\n", props.asyncEngineCount);
printf("'multiprocessors' : %u,\\n", props.multiProcessorCount);
printf("},\\n");
}
printf("]\\n");
printf("}\\n");
return 0;
}
''' # noqa
with _tempdir() as temp_dir:
test_cu_path = os.path.join(temp_dir, 'test.cu')
test_out_path = os.path.join(temp_dir, 'test.out')
with open(test_cu_path, 'w') as f:
f.write(code)
proc = subprocess.Popen(
nvcc_path + ['-o', test_out_path, test_cu_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("Cannot determine "
"compute architecture {0}"
.format(stderrdata))
try:
out = subprocess.check_output(test_out_path)
except Exception as e:
msg = 'Cannot execute a stub file.\nOriginal error: {0}'.format(e)
raise Exception(msg)
return ast.literal_eval(out)
def get_cuda_info():
global _cuda_info
if _cuda_info is None:
_cuda_info = _get_cuda_info()
return _cuda_info
def _format_cuda_version(version):
return str(version)
def get_cuda_version(formatted=False):
"""Return CUDA Toolkit version cached in check_cuda_version()."""
_cuda_version = get_cuda_info()['cuda_version']
if _cuda_version < minimum_cuda_version:
raise ValueError('CUDA version is too old: %d'
'CUDA v7.0 or newer is required' % _cuda_version)
return str(_cuda_version) if formatted else _cuda_version
def get_gencode_options():
return ["--generate-code=arch=compute_{a},code=sm_{a}".format(
a=dev['major']*10 + dev['minor'])
for dev in get_cuda_info()['devices']]
class _UnixCCompiler(unixccompiler.UnixCCompiler):
src_extensions = list(unixccompiler.UnixCCompiler.src_extensions)
src_extensions.append('.cu')
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
# For sources other than CUDA C ones, just call the super class method.
if os.path.splitext(src)[1] != '.cu':
return unixccompiler.UnixCCompiler._compile(
self, obj, src, ext, cc_args, extra_postargs, pp_opts)
# For CUDA C source files, compile them with NVCC.
_compiler_so = self.compiler_so
try:
nvcc_path = get_nvcc_path()
base_opts = get_compiler_base_options()
self.set_executable('compiler_so', nvcc_path)
cuda_version = get_cuda_version() # noqa: triggers cuda inspection
postargs = get_gencode_options() + [
'-O2', '--compiler-options="-fPIC"']
postargs += extra_postargs
# print('NVCC options:', postargs)
return unixccompiler.UnixCCompiler._compile(
self, obj, src, ext, base_opts + cc_args, postargs, pp_opts)
finally:
self.compiler_so = _compiler_so
class _MSVCCompiler(msvccompiler.MSVCCompiler):
_cu_extensions = ['.cu']
src_extensions = list(unixccompiler.UnixCCompiler.src_extensions)
src_extensions.extend(_cu_extensions)
def _compile_cu(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
# Compile CUDA C files, mainly derived from UnixCCompiler._compile().
macros, objects, extra_postargs, pp_opts, _build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compiler_so = get_nvcc_path()
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
cuda_version = get_cuda_version() # noqa: triggers cuda inspection
postargs = get_gencode_options() + ['-O2']
postargs += ['-Xcompiler', '/MD']
postargs += extra_postargs
# print('NVCC options:', postargs)
for obj in objects:
try:
src, ext = _build[obj]
except KeyError:
continue
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] + postargs)
except errors.DistutilsExecError as e:
raise errors.CompileError(str(e))
return objects
def compile(self, sources, **kwargs):
# Split CUDA C sources and others.
cu_sources = []
other_sources = []
for source in sources:
if os.path.splitext(source)[1] == '.cu':
cu_sources.append(source)
else:
other_sources.append(source)
# Compile source files other than CUDA C ones.
other_objects = msvccompiler.MSVCCompiler.compile(
self, other_sources, **kwargs)
# Compile CUDA C sources.
cu_objects = self._compile_cu(cu_sources, **kwargs)
# Return compiled object filenames.
return other_objects + cu_objects
_compiler = None
def get_compiler():
global _compiler
if _compiler is None:
if not PLATFORM_WIN32:
_compiler = _UnixCCompiler()
else:
_compiler = _MSVCCompiler()
return _compiler
@contextlib.contextmanager
def stdchannel_redirected(stdchannel, dest_filename):
"""
A context manager to temporarily redirect stdout or stderr
e.g.:
with stdchannel_redirected(sys.stderr, os.devnull):
if compiler.has_function('clock_gettime', libraries=['rt']):
libraries.append('rt')
"""
try:
oldstdchannel = os.dup(stdchannel.fileno())
dest_file = open(dest_filename, 'w')
os.dup2(dest_file.fileno(), stdchannel.fileno())
yield
finally:
if oldstdchannel is not None:
os.dup2(oldstdchannel, stdchannel.fileno())
if dest_file is not None:
dest_file.close()
@requires_optional("cupy", cupy_import_error)
def compile_using_nvcc(source, options=None, arch=None, filename='kern.cu'):
options = options or []
if arch is None:
cuda_info = get_cuda_info()
arch = min([dev['major']*10 + dev['minor']
for dev in cuda_info['devices']])
cc = get_compiler()
settings = get_compiler_setting()
arch = "--generate-code=arch=compute_{a},code=sm_{a}".format(a=arch)
options += ['-cubin']
cupy_path = resource_filename("cupy", pjoin("core", "include"))
settings['include_dirs'].append(cupy_path)
with _tempdir() as tmpdir:
tmpfile = pjoin(tmpdir, filename)
with open(tmpfile, "w") as f:
f.write(source)
try:
stderr_file = pjoin(tmpdir, "stderr.txt")
with stdchannel_redirected(sys.stderr, stderr_file):
objects = cc.compile([tmpfile],
include_dirs=settings['include_dirs'],
macros=settings['define_macros'],
extra_postargs=options)
except errors.CompileError as e:
with open(stderr_file, "r") as f:
errs = f.read()
lines = ["The following source code",
format_code(source),
"",
"created the following compilation errors",
"",
errs.strip(),
str(e).strip()]
ex = errors.CompileError("\n".join(lines))
raise (ex, None, sys.exc_info()[2])
assert len(objects) == 1
mod = cp.cuda.function.Module()
mod.load_file(objects[0])
return mod
```
#### File: africanus/util/requirements.py
```python
import importlib
from decorator import decorate
from africanus.util.docs import on_rtd
from africanus.util.testing import in_pytest, force_missing_pkg_exception
def _missing_packages(fn, packages, import_errors):
if len(import_errors) > 0:
import_err_str = "\n".join((str(e) for e in import_errors))
return ("%s requires installation of "
"the following packages: %s.\n"
"%s" % (fn, packages, import_err_str))
else:
return ("%s requires installation of the following packages: %s. "
% (fn, tuple(packages)))
class MissingPackageException(Exception):
pass
def requires_optional(*requirements):
"""
Decorator returning either the original function,
or a dummy function raising a
:class:`MissingPackageException` when called,
depending on whether the supplied ``requirements``
are present.
If packages are missing and called within a test, the
dummy function will call :func:`pytest.skip`.
Used in the following way:
.. code-block:: python
try:
from scipy import interpolate
except ImportError as e:
# https://stackoverflow.com/a/29268974/1611416, pep 3110 and 344
scipy_import_error = e
else:
scipy_import_error = None
@requires_optional('scipy', scipy_import_error)
def function(*args, **kwargs):
return interpolate(...)
Parameters
----------
requirements : iterable of string, None or ImportError
Sequence of package names required by the decorated function.
ImportError exceptions (or None, indicating their absence)
may also be supplied and will be immediately re-raised within
the decorator. This is useful for tracking down problems
in user import logic.
Returns
-------
callable
Either the original function if all ``requirements``
are available or a dummy function that throws
a :class:`MissingPackageException` or skips a pytest.
"""
# Return a bare wrapper if we're on readthedocs
if on_rtd():
def _function_decorator(fn):
def _wrapper(*args, **kwargs):
pass
return decorate(fn, _wrapper)
return _function_decorator
have_requirements = True
missing_requirements = []
honour_pytest_marker = True
actual_imports = []
import_errors = []
# Try imports
for requirement in requirements:
# Ignore
if requirement is None:
continue
# Reraise any supplied ImportErrors
elif isinstance(requirement, ImportError):
import_errors.append(requirement)
# An actual package, try to import it
elif isinstance(requirement, str):
try:
importlib.import_module(requirement)
except ImportError:
missing_requirements.append(requirement)
have_requirements = False
else:
actual_imports.append(requirement)
# We should force exceptions, even if we're in a pytest test case
elif requirement == force_missing_pkg_exception:
honour_pytest_marker = False
# Just wrong
else:
raise TypeError("requirements must be "
"None, strings or ImportErrors. "
"Received %s" % requirement)
# Requested requirement import succeeded, but there were user
# import errors that we now re-raise
if have_requirements and len(import_errors) > 0:
raise ImportError("Successfully imported %s "
"but the following user-supplied "
"ImportErrors ocurred: \n%s" %
(actual_imports,
'\n'.join((str(e) for e in import_errors))))
def _function_decorator(fn):
# We have requirements, return the original function
if have_requirements:
return fn
# We don't have requirements, produce a failing wrapper
def _wrapper(*args, **kwargs):
""" Empty docstring """
# We're running test cases
if honour_pytest_marker and in_pytest():
try:
import pytest
except ImportError as e:
raise ImportError("Marked as in a pytest "
"test case, but pytest cannot "
"be imported! %s" % str(e))
else:
msg = _missing_packages(
fn.__name__, missing_requirements, import_errors)
pytest.skip(msg)
# Raise the exception
else:
msg = _missing_packages(
fn.__name__, missing_requirements, import_errors)
raise MissingPackageException(msg)
return decorate(fn, _wrapper)
return _function_decorator
``` |
{
"source": "JoshW-7/AdventOfCode",
"score": 3
} |
#### File: 2019/Day 2/part1.py
```python
class CPU:
def __init__(self):
self.running = True
self.position = 0
with open("input.txt") as file:
self.bytecode = [int(num) for num in file.readline().split(",")]
def fix(self):
self.bytecode[1] = 12
self.bytecode[2] = 2
def run(self):
while self.running:
self.decode()
def decode(self):
opcode = self.bytecode[self.position]
if opcode == 99:
self.running = False
elif opcode == 1:
location_1 = self.bytecode[self.position+1]
location_2 = self.bytecode[self.position+2]
location_3 = self.bytecode[self.position+3]
self.bytecode[location_3] = self.bytecode[location_1] + self.bytecode[location_2]
elif opcode == 2:
location_1 = self.bytecode[self.position+1]
location_2 = self.bytecode[self.position+2]
location_3 = self.bytecode[self.position+3]
self.bytecode[location_3] = self.bytecode[location_1] * self.bytecode[location_2]
self.position += 4
cpu = CPU()
cpu.fix()
cpu.run()
print(cpu.bytecode[0])
```
#### File: 2019/Day 2/part2.py
```python
class CPU:
def __init__(self):
self.reset()
def reset(self):
self.running = True
self.position = 0
with open("input.txt") as file:
self.bytecode = [int(num) for num in file.readline().split(",")]
def set_noun(self, value):
self.bytecode[1] = value
def set_verb(self, value):
self.bytecode[2] = value
def get_result(self):
while self.running:
self.decode()
return self.bytecode[0]
def decode(self):
opcode = self.bytecode[self.position]
if opcode == 99:
self.running = False
elif opcode == 1:
location_1 = self.bytecode[self.position+1]
location_2 = self.bytecode[self.position+2]
location_3 = self.bytecode[self.position+3]
self.bytecode[location_3] = self.bytecode[location_1] + self.bytecode[location_2]
elif opcode == 2:
location_1 = self.bytecode[self.position+1]
location_2 = self.bytecode[self.position+2]
location_3 = self.bytecode[self.position+3]
self.bytecode[location_3] = self.bytecode[location_1] * self.bytecode[location_2]
self.position += 4
cpu = CPU()
result = 0
done = False
while not done:
for i in range(0, 100):
if done:
break
for j in range(0, 100):
cpu.reset()
cpu.set_noun(i)
cpu.set_verb(j)
result = cpu.get_result()
if result == 19690720:
done = True
break
print(100 * cpu.bytecode[1] + cpu.bytecode[2])
```
#### File: 2019/Day 3/part1.py
```python
def relative_coordinates(move):
return {
"L": (-1*int(move[1:]), 0),
"R": (int(move[1:]), 0),
"U": (0, -1*int(move[1:])),
"D": (0, int(move[1:])),
}.get(move[0])
def create_wire(wire, name):
global coordinates
global crossings
position = [0, 0]
for move in wire:
movement = relative_coordinates(move)
if movement[0]:
for x in range(1, abs(movement[0])+1):
if movement[0] < 0:
x *= -1
current_value = coordinates.get((position[0]+x, position[1]))
if current_value not in [None, name]:
crossings.append((position[0]+x, position[1]))
coordinates[(position[0]+x, position[1])] = name
elif movement[1]:
for y in range(1, abs(movement[1])+1):
if movement[1] < 0:
y *= -1
current_value = coordinates.get((position[0], position[1]+y))
if current_value not in [None, name]:
crossings.append((position[0], position[1]+y))
coordinates[(position[0], position[1]+y)] = name
position[0] += movement[0]
position[1] += movement[1]
def manhattan_distance(coordinate):
return abs(coordinate[0]) + abs(coordinate[1])
with open("input.txt") as file:
wires = []
for line in file.readlines():
wires.append(line.split(","))
coordinates = {}
crossings = []
create_wire(wires[0], "w1")
create_wire(wires[1], "w2")
print(min([manhattan_distance(coordinate) for coordinate in crossings]))
```
#### File: 2020/Day 18/part1.py
```python
import re
with open("input.txt") as file:
lines = file.read().split("\n")
def evaluate(statement):
while not statement.isdigit():
if m := re.match(r"[0-9]* [+\-*] [0-9]*", statement):
s = m.group(0)
result = eval(s)
statement = statement[0:statement.find(s)] + str(result) + statement[statement.find(s) + len(s):]
return statement
#lines = ["((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2"]
results = []
for line in lines:
# Remove parentheses
while True:
if ps := re.findall(r"\(([^)()]*)\)", line):
for p in ps:
new_str = evaluate(p)
print(line)
line = line[0:line.find(p)-1] + new_str + line[1 + line.find(p) + len(p):]
print(line)
print()
else:
break
#print(line)
results.append(int(evaluate(line)))
print(sum(results))
```
#### File: 2020/Day 24/part2.py
```python
import re
import copy
def parse(text):
moves = []
while len(text) > 0:
if m := re.match(r"([e|w]{1})", text):
moves.append(m.group(1))
text = text[m.end():]
elif m := re.match(r"([se|sw|ne|nw]{2})", text):
moves.append(m.group(1))
text = text[m.end():]
return moves
def setup():
global tiles
for moves in tile_locations:
x = 0
y = 0
for move in moves:
dx,dy = {
"ne": (0, -1),
"e": (1, 0),
"se": (1, 1),
"sw": (-1, 1),
"w": (-1, 0),
"nw": (-1, -1),
}[move]
x += dx
y += dy
if (x, y) not in tiles:
tiles[(x, y)] = "white"
if tiles[(x, y)] == "white":
tiles[(x, y)] = "black"
else:
tiles[(x, y)] = "white"
def get_adjacent(x, y):
global tiles
relative_coords = [(0, 1), (1, 0), (1, -1), (0, -1), (-1, 0), (-1, 1)]
neighbors = []
for relative_coord in relative_coords:
neighbors.append((x + relative_coord[0], y + relative_coord[1]))
return neighbors
def count_black():
global tiles
count = 0
for color in tiles.values():
if color == "black":
count += 1
return count
with open("input.txt") as file:
lines = file.read().split("\n")
tile_locations = []
for line in lines:
tile_locations.append(parse(line))
tiles = {}
setup()
print(f"Day 0: {count_black()}")
for day in range(1, 101):
changes = {}
new_tiles = []
for (x,y),color in tiles.items():
black_count = 0
for coord in get_adjacent(x, y):
if coord not in tiles:
new_tiles.append(coord)
else:
if tiles[coord] == "black":
black_count += 1
if color == "white" and black_count == 2:
changes[(x, y)] = "black"
if color == "black":
if black_count == 0 or black_count > 2:
changes[(x, y)] = "white"
for new_coord in new_tiles:
tiles[new_coord] = "white"
black_count = 0
for coord in get_adjacent(x, y):
if coord in tiles and tiles[coord] == "black":
black_count += 1
if black_count == 2:
changes[new_coord] = "black"
for coord,color in changes.items():
tiles[coord] = color
print(f"Day {day}: {count_black()}")
```
#### File: 2020/Day 5/part2.py
```python
def get_id(seat_encoding):
row_data = seat_encoding[0:7]
col_data = seat_encoding[7:]
# Get row
row = 0
for i,c in enumerate(reversed(row_data)):
if c == "B":
row |= 1 << i
# Get col
col = 0
for i,c in enumerate(reversed(col_data)):
if c == "R":
col |= 1 << i
return row * 8 + col
with open("input.txt") as file:
lines = file.read().split("\n")
seats = []
for line in lines:
seat = get_id(line)
seats.append(seat)
minimum = min(seats)
for i in range(len(seats)):
if i > minimum:
if i not in seats:
print(i)
break
```
#### File: 2020/Day 8/part2.py
```python
from copy import deepcopy
class CPU:
def __init__(self, program=[]):
self.pc = 0
self.accumulator = 0
self.program = program
self.memory_size = len(self.program)
self.used_indexes = set()
self.running = True
self.valid = False
def run(self):
while self.running:
self.decode()
def decode(self):
self.used_indexes.add(self.pc)
op, args = self.program[self.pc]
if func := getattr(self, op):
func(args)
else:
print(f"Unsupported opcode: {op}")
if self.pc in self.used_indexes:
self.running = False
elif self.pc >= self.memory_size:
self.valid = True
self.running = False
def nop(self, args):
self.pc += 1
def acc(self, args):
self.accumulator += int(args[0])
self.pc += 1
def jmp(self, args):
self.pc += int(args[0])
with open("input.txt") as file:
lines = file.read().split("\n")
program = []
for line in lines:
op, *args = line.split(" ")
program.append([op, args])
jmp_lines = [i for i,line in enumerate(program) if line[0] == "jmp"]
for index in jmp_lines:
temp_program = deepcopy(program)
temp_program[index][0] = "nop"
cpu = CPU(program=temp_program)
cpu.run()
if cpu.valid:
print(cpu.accumulator)
nop_lines = [i for i,line in enumerate(program) if line[0] == "nop"]
for index in nop_lines:
temp_program = deepcopy(program)
temp_program[index][0] = "jmp"
cpu = CPU(program=temp_program)
cpu.run()
if cpu.valid:
print(cpu.accumulator)
``` |
{
"source": "joshwalawender/CSU_initializer",
"score": 2
} |
#### File: CSU_initializer/CSU_initializer_plugin/CSU_initializer.py
```python
from ginga import GingaPlugin
from ginga.gw import Widgets
# import any other modules you want here--it's a python world!
import os
from datetime import datetime as dt
import numpy as np
from ginga import GingaPlugin, RGBImage, colors
from ginga.gw import Widgets
from ginga.misc import ParamSet, Bunch
from ginga.util import dp
from ginga.gw.GwHelp import FileSelection
from astropy.io import fits
from astropy.modeling import models, fitting
from scipy import ndimage
import socket
class CSU_initializer(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
"""
This method is called when the plugin is loaded for the first
time. ``fv`` is a reference to the Ginga (reference viewer) shell
and ``fitsimage`` is a reference to the specific ImageViewCanvas
object associated with the channel on which the plugin is being
invoked.
You need to call the superclass initializer and then do any local
initialization.
"""
super(CSU_initializer, self).__init__(fv, fitsimage)
# Load plugin preferences
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_CSU_initializer')
self.settings.setDefaults(ibar_num=1,
mbar_num=1,
ebar_num=1,
move_to_open=False,
bar_dest=0.0,
bar_pos=137.0,
)
self.settings.load(onError='silent')
self.instrument_hosts = ['vm-mosfire', 'nuu', 'vm-mosfirebld']
self.hostname = socket.gethostname().split('.')[0].lower()
self.bars_analysis = None
self.state_analysis = None
self.bars_file = None
self.state_file = None
self.bars_header = None
self.state_header = None
self.layertag = 'bars-canvas'
self.dc = fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(False)
canvas.set_surface(self.fitsimage)
self.canvas = canvas
self.colornames = colors.get_colors()
self.canvas_img = None
self.mfilesel = FileSelection(self.fv.w.root.get_widget())
## Fit relationship between bar position and pixels
tick = dt.now()
pixels, physical = self.get_data()
self.fit_transforms(pixels, physical)
tock = dt.now()
elapsed = (tock-tick).total_seconds()
# print('Completed fit of transforms in {:.3f} s'.format(elapsed))
## Determine slit angle and bar center to center distance in pixels
## from the transformation and the known longslit positions
## in longslit, bar 02 is at 145.472
## in longslit, bar 92 is at 129.480
physical = [ [145.472, self.bar_to_slit(2)],
[129.480, self.bar_to_slit(92)] ]
pixels = self.physical_to_pixel(physical)
dx = pixels[1][0] - pixels[0][0]
dy = pixels[0][1] - pixels[1][1]
self.slit_angle_pix = np.arctan(dx/dy)
# print("Slit Angle on CCD = {:.3f} deg".format(self.slit_angle_pix * 180./np.pi))
self.slit_height_pix = dy / (self.bar_to_slit(92) - self.bar_to_slit(2))
# print("Slit Height on CCD = {:.3f} pix".format(self.slit_height_pix))
def build_gui(self, container):
"""
This method is called when the plugin is invoked. It builds the
GUI used by the plugin into the widget layout passed as
``container``.
This method may be called many times as the plugin is opened and
closed for modal operations. The method may be omitted if there
is no GUI for the plugin.
This specific example uses the GUI widget set agnostic wrappers
to build the GUI, but you can also just as easily use explicit
toolkit calls here if you only want to support one widget set.
"""
top = Widgets.VBox()
top.set_border_width(4)
# this is a little trick for making plugins that work either in
# a vertical or horizontal orientation. It returns a box container,
# a scroll widget and an orientation ('vertical', 'horizontal')
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
self.msg_font = self.fv.get_font("sansFont", 12)
## -----------------------------------------------------
## Acquire or Load Image
## -----------------------------------------------------
fr = Widgets.Frame("Image the CSU Mask")
vbox.add_widget(fr, stretch=0)
btns1 = Widgets.HBox()
btns1.set_spacing(1)
btn_acq_im = Widgets.Button("Acquire Mask Image")
btn_acq_im.add_callback('activated', lambda w: self.acq_mask_image())
btns1.add_widget(btn_acq_im, stretch=0)
btns1.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns1, stretch=0)
## -----------------------------------------------------
## Analyze Image
## -----------------------------------------------------
fr = Widgets.Frame("Analyze CSU Mask Image")
vbox.add_widget(fr, stretch=0)
btns2 = Widgets.HBox()
btns2.set_spacing(3)
btn_analyze = Widgets.Button("Analyze Mask Image")
btn_analyze.add_callback('activated', lambda w: self.analyze_mask_image())
btns2.add_widget(btn_analyze, stretch=0)
btns2.add_widget(Widgets.Label(''), stretch=1)
btn_overlay = Widgets.Button("Overlay Analysis Results")
btn_overlay.add_callback('activated', lambda w: self.overlay_analysis_results())
btns2.add_widget(btn_overlay, stretch=0)
btns2.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns2, stretch=0)
## -----------------------------------------------------
## Edit Analysis Results
## -----------------------------------------------------
fr = Widgets.Frame("Edit Analysis Results")
captions = [
("Set Bar Number", 'label',\
'set_ebar_num', 'entry',),\
("Set Position", 'label',\
'set_bar_pos', 'entry'),\
("Edit Bar #", 'label',\
'ebar_num', 'llabel',
'to', 'label',
'bar_pos', 'llabel',
"mm", 'label',\
"Edit Bar", 'button'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
ebar_num = int(self.settings.get('ebar_num', 1))
b.ebar_num.set_text('{:2d}'.format(ebar_num))
b.set_ebar_num.set_text('{:2d}'.format(ebar_num))
b.set_ebar_num.add_callback('activated', self.set_ebar_num_cb)
b.set_ebar_num.set_tooltip("Set bar number to move")
bar_pos = float(self.settings.get('bar_pos', 0.0))
b.bar_pos.set_text('{:+.1f}'.format(bar_pos))
b.set_bar_pos.set_text('{:+.1f}'.format(bar_pos))
b.set_bar_pos.add_callback('activated', self.set_bar_pos_cb)
b.set_bar_pos.set_tooltip("Set distance to move bar")
b.edit_bar.add_callback('activated', lambda w: self.edit_bar())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
## -----------------------------------------------------
## Bar Overlay
## -----------------------------------------------------
fr = Widgets.Frame("Bar Positions Overlay")
vbox.add_widget(fr, stretch=0)
btns1 = Widgets.HBox()
btns1.set_spacing(1)
btn_csu_bar_state = Widgets.Button("From csu_bar_state")
btn_csu_bar_state.add_callback('activated', lambda w: self.overlaybars_from_file())
btns1.add_widget(btn_csu_bar_state, stretch=0)
btns1.add_widget(Widgets.Label(''), stretch=1)
btn_fits_header = Widgets.Button("From FITS Header")
btn_fits_header.add_callback('activated', lambda w: self.overlaybars_from_header())
btns1.add_widget(btn_fits_header, stretch=0)
btns1.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns1, stretch=0)
btns2 = Widgets.HBox()
btns2.set_spacing(1)
btn_clear = Widgets.Button("Clear Overlays")
btn_clear.add_callback('activated', lambda w: self.clear_canvas())
btns2.add_widget(btn_clear, stretch=0)
btns2.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns2, stretch=0)
## -----------------------------------------------------
## Initialize Bar
## -----------------------------------------------------
fr = Widgets.Frame("Individual Bar Initialization")
captions = [
("Set Bar Number", 'label',\
'set_ibar_num', 'entry',),\
("Initialize Bar #", 'label',\
'ibar_num', 'llabel',\
"Initialize Bar", 'button',\
"Open Before Init", 'checkbutton'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
ibar_num = int(self.settings.get('ibar_num', 1))
b.ibar_num.set_text('{:2d}'.format(ibar_num))
b.set_ibar_num.set_text('{:2d}'.format(ibar_num))
b.set_ibar_num.add_callback('activated', self.set_ibar_num_cb)
b.set_ibar_num.set_tooltip("Set bar number to initialize")
b.open_before_init.set_tooltip("Move bar to open position before initialization")
open_before_init = self.settings.get('move_to_open', False)
b.open_before_init.set_state(open_before_init)
b.open_before_init.add_callback('activated', self.open_before_init_cb)
b.initialize_bar.add_callback('activated', lambda w: self.initialize_bar())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
## -----------------------------------------------------
## Move Bar
## -----------------------------------------------------
# Frame for instructions and add the text widget with another
# blank widget to stretch as needed to fill emp
fr = Widgets.Frame("Individual Bar Control")
captions = [
("Set Bar Number", 'label',\
'set_mbar_num', 'entry',),\
("Set Destination", 'label',\
'set_bar_dest', 'entry'),\
("Move Bar #", 'label',\
'mbar_num', 'llabel',
'to', 'label',
'bar_dest', 'llabel',
"mm", 'label',\
"Move Bar", 'button'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
mbar_num = int(self.settings.get('mbar_num', 1))
b.mbar_num.set_text('{:2d}'.format(mbar_num))
b.set_mbar_num.set_text('{:2d}'.format(mbar_num))
b.set_mbar_num.add_callback('activated', self.set_mbar_num_cb)
b.set_mbar_num.set_tooltip("Set bar number to move")
bar_dest = float(self.settings.get('bar_dest', 0.0))
b.bar_dest.set_text('{:+.1f}'.format(bar_dest))
b.set_bar_dest.set_text('{:+.1f}'.format(bar_dest))
b.set_bar_dest.add_callback('activated', self.set_bar_dest_cb)
b.set_bar_dest.set_tooltip("Set distance to move bar")
b.move_bar.add_callback('activated', lambda w: self.move_bar())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
## -----------------------------------------------------
## Spacer
## -----------------------------------------------------
# Add a spacer to stretch the rest of the way to the end of the
# plugin space
spacer = Widgets.Label('')
vbox.add_widget(spacer, stretch=1)
# scroll bars will allow lots of content to be accessed
top.add_widget(sw, stretch=1)
## -----------------------------------------------------
## Bottom
## -----------------------------------------------------
# A button box that is always visible at the bottom
btns_close = Widgets.HBox()
btns_close.set_spacing(3)
# Add a close button for the convenience of the user
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns_close.add_widget(btn, stretch=0)
btns_close.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns_close, stretch=0)
# Add our GUI to the container
container.add_widget(top, stretch=1)
# NOTE: if you are building a GUI using a specific widget toolkit
# (e.g. Qt) GUI calls, you need to extract the widget or layout
# from the non-toolkit specific container wrapper and call on that
# to pack your widget, e.g.:
#cw = container.get_widget()
#cw.addWidget(widget, stretch=1)
def close(self):
"""
Example close method. You can use this method and attach it as a
callback to a button that you place in your GUI to close the plugin
as a convenience to the user.
"""
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
"""
This method is called just after ``build_gui()`` when the plugin
is invoked. This method may be called many times as the plugin is
opened and closed for modal operations. This method may be omitted
in many cases.
"""
# start ruler drawing operation
p_canvas = self.fitsimage.get_canvas()
try:
obj = p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add ruler layer
p_canvas.add(self.canvas, tag=self.layertag)
self.resume()
def pause(self):
"""
This method is called when the plugin loses focus.
It should take any actions necessary to stop handling user
interaction events that were initiated in ``start()`` or
``resume()``.
This method may be called many times as the plugin is focused
or defocused. It may be omitted if there is no user event handling
to disable.
"""
pass
def resume(self):
"""
This method is called when the plugin gets focus.
It should take any actions necessary to start handling user
interaction events for the operations that it does.
This method may be called many times as the plugin is focused or
defocused. The method may be omitted if there is no user event
handling to enable.
"""
pass
def stop(self):
"""
This method is called when the plugin is stopped.
It should perform any special clean up necessary to terminate
the operation. The GUI will be destroyed by the plugin manager
so there is no need for the stop method to do that.
This method may be called many times as the plugin is opened and
closed for modal operations, and may be omitted if there is no
special cleanup required when stopping.
"""
pass
def redo(self):
"""
This method is called when the plugin is active and a new
image is loaded into the associated channel. It can optionally
redo the current operation on the new image. This method may be
called many times as new images are loaded while the plugin is
active. This method may be omitted.
"""
pass
def __str__(self):
"""
This method should be provided and should return the lower case
name of the plugin.
"""
return 'CSU Initializer Plugin'
## ------------------------------------------------------------------
## Coordinate Transformation Utilities
## ------------------------------------------------------------------
def slit_to_bars(self, slit):
'''Given a slit number (1-46), return the two bar numbers associated
with that slit.
'''
return (slit*2-1, slit*2)
def bar_to_slit(self, bar):
'''Given a bar number, retun the slit associated with that bar.
'''
return int((bar+1)/2)
def pad(self, x):
'''Pad array for affine transformation.
'''
return np.hstack([x, np.ones((x.shape[0], 1))])
def unpad(self, x):
'''Unpad array for affine transformation.
'''
return x[:,:-1]
def fit_transforms(self, pixels, physical):
'''Given a set of pixel coordinates (X, Y) and a set of physical
coordinates (mm, slit), fit the affine transformations (forward and
backward) to convert between the two coordinate systems.
'''
assert pixels.shape[1] == 2
assert physical.shape[1] == 2
assert pixels.shape[0] == physical.shape[0]
# Pad the data with ones, so that our transformation can do translations too
n = pixels.shape[0]
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
X = pad(pixels)
Y = pad(physical)
# Solve the least squares problem X * A = Y
# to find our transformation matrix A
A, res, rank, s = np.linalg.lstsq(X, Y)
Ainv, res, rank, s = np.linalg.lstsq(Y, X)
A[np.abs(A) < 1e-10] = 0
Ainv[np.abs(A) < 1e-10] = 0
self.Apixel_to_physical = A
self.Aphysical_to_pixel = Ainv
def pixel_to_physical(self, x):
'''Using the affine transformation determined by `fit_transforms`,
convert a set of pixel coordinates (X, Y) to physical coordinates (mm,
slit).
'''
x = np.array(x)
result = self.unpad(np.dot(self.pad(x), self.Apixel_to_physical))
return result
def physical_to_pixel(self, x):
'''Using the affine transformation determined by `fit_transforms`,
convert a set of physical coordinates (mm, slit) to pixel coordinates
(X, Y).
'''
x = np.array(x)
result = self.unpad(np.dot(self.pad(x), self.Aphysical_to_pixel))
return result
## ------------------------------------------------------------------
## Analyze Image to Determine Bar Positions
## ------------------------------------------------------------------
def analyze_mask_image(self, filtersize=7):
'''Loop over all slits in the image and using the affine transformation
determined by `fit_transforms`, select the Y pixel range over which this
slit should be found. Take a median filtered version of that image and
determine the X direction gradient (derivative). Then collapse it in
the Y direction to form a 1D profile.
Using the `find_bar_edges` method, determine the X pixel positions of
each bar forming the slit.
Convert those X pixel position to physical coordinates using the
`pixel_to_physical` method and then call the `compare_to_csu_bar_state`
method to determine the bar state.
'''
## Get image
try:
channel = self.fv.get_channel(self.chname)
image = channel.get_current_image()
data = image._get_data()
except:
print('Failed to load image data')
return
# median X pixels only (preserve Y structure)
medimage = ndimage.median_filter(data, size=(1, filtersize))
self.bars_analysis = {}
self.state_analysis = {}
for slit in range(1,47):
b1, b2 = self.slit_to_bars(slit)
## Determine y pixel range
y1 = int(np.ceil((self.physical_to_pixel(np.array([(4.0, slit+0.5)])))[0][1]))
y2 = int(np.floor((self.physical_to_pixel(np.array([(270.4, slit-0.5)])))[0][1]))
gradx = np.gradient(medimage[y1:y2,:], axis=1)
horizontal_profile = np.sum(gradx, axis=0)
x1, x2 = self.find_bar_edges(horizontal_profile)
if x1 is None:
self.bars_analysis[b1] = None
self.state_analysis[b1] = 'UNKNOWN'
else:
mm1 = (self.pixel_to_physical(np.array([(x1, (y1+y2)/2.)])))[0][0]
self.bars_analysis[b1] = mm1
self.state_analysis[b1] = 'ANALYZED'
if x2 is None:
self.bars_analysis[b2] = None
self.state_analysis[b2] = 'UNKNOWN'
else:
mm2 = (self.pixel_to_physical(np.array([(x2, (y1+y2)/2.)])))[0][0]
self.bars_analysis[b2] = mm2
self.state_analysis[b2] = 'ANALYZED'
self.compare_to_csu_bar_state()
def find_bar_edges(self, horizontal_profile):
'''Given a 1D profile, dertermime the X position of each bar that forms
a single slit. The slit edges are found by fitting one positive and
one negative gaussian function to the profile.
'''
fitter = fitting.LevMarLSQFitter()
amp1_est = horizontal_profile[horizontal_profile == min(horizontal_profile)]
mean1_est = np.argmin(horizontal_profile)
amp2_est = horizontal_profile[horizontal_profile == max(horizontal_profile)]
mean2_est = np.argmax(horizontal_profile)
g_init1 = models.Gaussian1D(amplitude=amp1_est, mean=mean1_est, stddev=2.)
g_init1.amplitude.max = 0
g_init2 = models.Gaussian1D(amplitude=amp2_est, mean=mean2_est, stddev=2.)
g_init2.amplitude.min = 0
model = g_init1 + g_init2
fit = fitter(model, range(0,horizontal_profile.shape[0]), horizontal_profile)
# Check Validity of Fit
if abs(fit.stddev_0.value) < 3 and abs(fit.stddev_1.value) < 3\
and fit.amplitude_0.value < -1 and fit.amplitude_1.value > 1\
and fit.mean_0.value > fit.mean_1.value:
x1 = fit.mean_0.value
x2 = fit.mean_1.value
else:
x1 = None
x2 = None
return (x1, x2)
def compare_to_csu_bar_state(self, tol=0.3):
if self.bars_analysis is None:
return
# Read csu_bar_state file
if self.hostname in self.instrument_hosts:
self.read_csu_bar_state('')
else:
print('Not running on permitted host. Loading dummy file.')
file = os.path.expanduser('~/MOSFIRE_Test_Data/csu_bar_state')
self.read_csu_bar_state(file)
# Set state for bars
for b in range(1,93):
# Set state_analysis to be same as state_file if not OK
if self.state_file[b] != 'OK':
self.state_analysis[b] = self.state_file[b]
# otherwise check if analysis matches csu_bar_state position
else:
self.check_safe(b)
def check_safe(self, b, tol=0.3):
try:
diff = self.bars_analysis[b] - self.bars_file[b]
if abs(diff) < tol:
self.state_analysis[b] = 'OK'
else:
if b % 2 == 0:
if diff > tol:
self.state_analysis[b] = 'DANGEROUS'
else:
self.state_analysis[b] = 'DISCREPANT'
elif b % 2 == 1:
if diff < -tol:
self.state_analysis[b] = 'DANGEROUS'
else:
self.state_analysis[b] = 'DISCREPANT'
except:
self.state_analysis[b] = 'UNKNOWN'
## ------------------------------------------------------------------
## Read Bar Positions and Overlay
## ------------------------------------------------------------------
def read_csu_bar_state(self, filename):
with open(filename, 'r') as FO:
lines = FO.readlines()
self.bars_file = {}
self.state_file = {}
state_trans = {0: 'OK', 1: 'SETUP', 2: 'MOVING', -3: 'ERROR'}
for line in lines:
barno, pos, statestr = line.strip('\n').split(',')
self.bars_file[int(barno)] = float(pos)
self.state_file[int(barno)] = state_trans[int(statestr)]
def read_bars_from_header(self, header):
self.bars_header = {}
self.state_header = {}
for i in range(1,93):
self.bars_header[i] = float(header['B{:02d}POS'.format(i)])
self.state_header[i] = 'FROM_FITS'
def overlaybars(self, bars, state=None, alpha=0.8):
colormap = {'OK': 'green',
'ERROR': 'red',
'DANGEROUS': 'red',
'DISCREPANT': 'yellow',
'UNKNOWN': 'orange',
'FROM_FITS': 'seagreen'}
draw_height = 0.45
for j in range(1, 47):
b1, b2 = self.slit_to_bars(j)
physical1 = [ [-2.0, j-draw_height],
[-2.0, j+draw_height],
[bars[b1], j+draw_height],
[bars[b1], j-draw_height] ]
physical1 = np.array(physical1)
pixels1 = self.physical_to_pixel(physical1)
pixels1[2][0] += draw_height * self.slit_height_pix * np.sin(self.slit_angle_pix)
pixels1[3][0] -= draw_height * self.slit_height_pix * np.sin(self.slit_angle_pix)
physical2 = [ [270.4+4.0, j-draw_height],
[270.4+4.0, j+draw_height],
[bars[b2], j+draw_height],
[bars[b2], j-draw_height] ]
physical2 = np.array(physical2)
pixels2 = self.physical_to_pixel(physical2)
pixels2[2][0] += draw_height * self.slit_height_pix * np.sin(self.slit_angle_pix)
pixels2[3][0] -= draw_height * self.slit_height_pix * np.sin(self.slit_angle_pix)
b1color = colormap.get(state[b1], 'gray')
b2color = colormap.get(state[b2], 'gray')
self.canvas.add(self.dc.Polygon(pixels1, color=b1color, alpha=alpha))
self.canvas.add(self.dc.Polygon(pixels2, color=b2color, alpha=alpha))
x1, y1 = self.physical_to_pixel([[7.0, j+0.3]])[0]
self.canvas.add(self.dc.Text(x1, y1, '{:d}'.format(b1),
fontsize=10, color=b1color))
x2, y2 = self.physical_to_pixel([[270.4-0.0, j+0.3]])[0]
self.canvas.add(self.dc.Text(x2, y2, '{:d}'.format(b2),
fontsize=10, color=b2color))
def overlay_analysis_results(self):
if self.bars_analysis is None:
return
self.overlaybars(self.bars_analysis, state=self.state_analysis)
def overlaybars_from_file(self):
if self.hostname in self.instrument_hosts:
self.read_csu_bar_state()
else:
print('Not running on permitted host. Loading dummy file.')
file = os.path.expanduser('~/MOSFIRE_Test_Data/csu_bar_state')
self.read_csu_bar_state(file)
self.overlaybars(self.bars_file, state=self.state_file)
def overlaybars_from_header(self):
## Get header
try:
channel = self.fv.get_channel(self.chname)
image = channel.get_current_image()
header = image.get_header()
except:
print('Failed to load header from image')
else:
self.read_bars_from_header(header)
self.overlaybars(self.bars_header, state=self.state_header)
def clear_canvas(self):
self.canvas.delete_all_objects()
## ------------------------------------------------------------------
## Bar Control
## ------------------------------------------------------------------
def move_bar_to_open(self, b):
try:
state = self.state_analysis[b]
except:
state = ''
if state is 'UNKNOWN':
print('Cannot move to open from unknown position. No action taken.')
return
if b % 2 == 0:
destination = 270.400
elif b % 2 == 1:
destination = 4.0
print('Moving bar #{:02d} to {:+.2f} mm'.format(b, destination))
cmd = 'csuMoveBar {:02d} {:.1f}'.format(b, destination)
print(cmd)
if self.hostname in self.instrument_hosts:
pass
else:
print('Not running on permitted host. No action taken.')
def move_bar(self):
bar = self.settings.get('mbar_num')
destination = self.settings.get('bar_dest')
print('Moving bar #{:02d} to {:+.2f} mm'.format(bar, destination))
cmd = 'csuMoveBar {:02d} {:.1f}'.format(bar, destination)
print(cmd)
if self.hostname in self.instrument_hosts:
pass
else:
print('Not running on permitted host. No action taken.')
def initialize_bar(self):
bar = self.settings.get('ibar_num')
if self.settings.get('move_to_open'):
self.move_bar_to_open(bar)
print('Initializing bar #{:02d}'.format(bar))
cmd = 'm csuinitbar={:02d}'.format(bar)
print(cmd)
if self.hostname in self.instrument_hosts:
pass
else:
print('Not running on permitted host. No action taken.')
## ------------------------------------------------------------------
## Button Callbacks
## ------------------------------------------------------------------
def acq_mask_image(self):
if self.hostname in self.instrument_hosts:
pass
else:
print('Not running on permitted host. No action taken.')
def set_ebar_num_cb(self, w):
ebar_num = int(w.get_text())
self.settings.set(ebar_num=ebar_num)
self.w.ebar_num.set_text('{:2d}'.format(ebar_num))
def set_bar_pos_cb(self, w):
bar_pos = float(w.get_text())
self.settings.set(bar_pos=bar_pos)
self.w.bar_pos.set_text('{:+.1f}'.format(bar_pos))
def edit_bar(self):
bar = self.settings.get('ebar_num')
destination = self.settings.get('bar_pos')
self.bars_analysis[bar] = destination
self.state_analysis[bar] = 'DISCREPANT'
self.clear_canvas()
self.overlay_analysis_results()
def set_ibar_num_cb(self, w):
ibar_num = int(w.get_text())
self.settings.set(ibar_num=ibar_num)
self.w.ibar_num.set_text('{:2d}'.format(ibar_num))
def set_mbar_num_cb(self, w):
mbar_num = int(w.get_text())
self.settings.set(mbar_num=mbar_num)
self.w.mbar_num.set_text('{:2d}'.format(mbar_num))
def set_bar_dest_cb(self, w):
bar_dest = float(w.get_text())
self.settings.set(bar_dest=bar_dest)
self.w.bar_dest.set_text('{:+.1f}'.format(bar_dest))
def open_before_init_cb(self, widget, tf):
self.settings.set(move_to_open=tf)
## ------------------------------------------------------------------
## Data to Fit Affine Transformation
## ------------------------------------------------------------------
def get_data(self):
pixels = np.array([ (1026.6847023205248, 31.815757489924671),
(1031.1293065907989, 31.815757489924671),
(1100.0527926274958, 76.568051304306408),
(1104.4723170387663, 76.568051304306408),
(869.79921202733158, 119.71402079180322),
(874.17468615739256, 119.71402079180322),
(790.04504261037619, 163.97941699869187),
(794.38269316256697, 163.97941699869187),
(844.76764696920873, 208.45498973235158),
(849.06840834451555, 208.45498973235158),
(918.16119587182891, 253.46863795483193),
(922.57167115281891, 253.46863795483193),
(667.1708458173706, 296.83477802171569),
(671.58750566149126, 296.83477802171569),
(1210.6743343816352, 342.85304935109269),
(1215.1047501727178, 342.85304935109269),
(1037.1504738673596, 386.56200191364559),
(1041.5376839155629, 386.56200191364559),
(1380.9733624348846, 431.75478066748974),
(1385.3923546613969, 431.75478066748974),
(1392.3137244788115, 476.40898670973735),
(1396.5838727543558, 476.40898670973735),
(701.99737614209846, 518.12290417047029),
(706.31972548163674, 518.12290417047029),
(775.43118955263321, 562.76481942553085),
(779.76336695630744, 562.76481942553085),
(695.39446696825667, 606.9386852721824),
(699.68592870194686, 606.9386852721824),
(1225.8966927438423, 652.79237015375304),
(1230.2681865131638, 652.79237015375304),
(1299.3047613957535, 697.52305237026349),
(1303.6542557465727, 697.52305237026349),
(953.60567493512144, 740.39597570556316),
(957.91890612112604, 740.39597570556316),
(1027.0080928255736, 784.70486151318767),
(1031.3650789520013, 784.70486151318767),
(1241.625753053888, 830.10892664282756),
(1245.9181149708163, 830.10892664282756),
(1266.796600696397, 874.17188807394371),
(1271.1082253968038, 874.17188807394371),
(1404.8881828516335, 919.85774261912377),
(1409.9449171925908, 919.85774261912377),
(1325.0207484270156, 963.32163630950686),
(1329.3681702175545, 963.32163630950686),
(1185.9570564396361, 1007.0164717446025),
(1190.2368155733498, 1007.0164717446025),
(1306.6628878384579, 1051.9073888851103),
(1310.9679069215179, 1051.9073888851103),
(1151.3860791138529, 1095.4860726831637),
(1155.7367238283309, 1095.4860726831637),
(1224.7162502034391, 1140.436681012593),
(1229.0598756552718, 1140.436681012593),
(904.70409145100268, 1183.267412335555),
(908.99297982589781, 1183.267412335555),
(978.00762214758913, 1227.9731804278615),
(982.41054057239705, 1227.9731804278615),
(869.65543493075677, 1271.3564678397893),
(873.95299108698168, 1271.3564678397893),
(942.99396243198464, 1316.2391922602001),
(947.36667894787513, 1316.2391922602001),
(1256.7806430753744, 1361.195495916817),
(1261.0847133245632, 1361.195495916817),
(1330.1305637595844, 1406.3795550431571),
(1334.3960288420271, 1406.3795550431571),
(1060.9423305503171, 1449.3586376395574),
(1065.3182032594575, 1449.3586376395574),
(1108.6465868246237, 1493.9756362677167),
(1112.9382994207679, 1493.9756362677167),
(662.84522896384874, 1536.9734554153649),
(667.12956877347722, 1536.9734554153649),
(712.5287834914659, 1581.2712766110319),
(716.80585127180609, 1581.2712766110319),
(956.48762939159371, 1626.1728182002655),
(960.9581522740466, 1626.1728182002655),
(723.23974640617337, 1670.0165354200499),
(727.67208274341931, 1670.0165354200499),
(1172.3594885486252, 1715.8650599984883),
(1176.8341929555718, 1715.8650599984883),
(1015.7329598422145, 1759.5446833817025),
(1020.1920698607528, 1759.5446833817025),
(935.82358262678224, 1803.5644982617907),
(940.3126440130676, 1803.5644982617907),
(989.98752991018682, 1847.9507718487364),
(994.40511955530712, 1847.9507718487364),
(1278.2218422583971, 1892.8072028048214),
(1282.7070969966558, 1892.8072028048214),
(1351.5377751257745, 1938.5923374638328),
(1355.9221844080257, 1938.5923374638328),
(1171.5812780061251, 1981.4914424153424),
(1176.0817255338613, 1981.4914424153424),
])
physical = np.array([ (139.917, self.bar_to_slit(92)),
(139.41, self.bar_to_slit(91)),
(130.322, self.bar_to_slit(90)),
(129.815, self.bar_to_slit(89)),
(160.334, self.bar_to_slit(88)),
(159.827, self.bar_to_slit(87)),
(170.738, self.bar_to_slit(86)),
(170.231, self.bar_to_slit(85)),
(163.579, self.bar_to_slit(84)),
(163.072, self.bar_to_slit(83)),
(153.983, self.bar_to_slit(82)),
(153.476, self.bar_to_slit(81)),
(186.718, self.bar_to_slit(80)),
(186.211, self.bar_to_slit(79)),
(115.773, self.bar_to_slit(78)),
(115.266, self.bar_to_slit(77)),
(138.413, self.bar_to_slit(76)),
(137.906, self.bar_to_slit(75)),
(93.508, self.bar_to_slit(74)),
(93.001, self.bar_to_slit(73)),
(92.021, self.bar_to_slit(72)),
(91.514, self.bar_to_slit(71)),
(182.097, self.bar_to_slit(70)),
(181.59, self.bar_to_slit(69)),
(172.502, self.bar_to_slit(68)),
(171.995, self.bar_to_slit(67)),
(182.905, self.bar_to_slit(66)),
(182.398, self.bar_to_slit(65)),
(113.665, self.bar_to_slit(64)),
(113.158, self.bar_to_slit(63)),
(104.069, self.bar_to_slit(62)),
(103.562, self.bar_to_slit(61)),
(149.161, self.bar_to_slit(60)),
(148.654, self.bar_to_slit(59)),
(139.566, self.bar_to_slit(58)),
(139.059, self.bar_to_slit(57)),
(111.528, self.bar_to_slit(56)),
(111.021, self.bar_to_slit(55)),
(108.22, self.bar_to_slit(54)),
(107.713, self.bar_to_slit(53)),
(90.189, self.bar_to_slit(52)),
(89.681, self.bar_to_slit(51)),
(100.593, self.bar_to_slit(50)),
(100.086, self.bar_to_slit(49)),
(118.731, self.bar_to_slit(48)),
(118.223, self.bar_to_slit(47)),
(102.94, self.bar_to_slit(46)),
(102.432, self.bar_to_slit(45)),
(123.212, self.bar_to_slit(44)),
(122.704, self.bar_to_slit(43)),
(113.615, self.bar_to_slit(42)),
(113.108, self.bar_to_slit(41)),
(155.354, self.bar_to_slit(40)),
(154.847, self.bar_to_slit(39)),
(145.759, self.bar_to_slit(38)),
(145.251, self.bar_to_slit(37)),
(159.887, self.bar_to_slit(36)),
(159.38, self.bar_to_slit(35)),
(150.292, self.bar_to_slit(34)),
(149.785, self.bar_to_slit(33)),
(109.338, self.bar_to_slit(32)),
(108.83, self.bar_to_slit(31)),
(99.742, self.bar_to_slit(30)),
(99.235, self.bar_to_slit(29)),
(134.842, self.bar_to_slit(28)),
(134.335, self.bar_to_slit(27)),
(128.616, self.bar_to_slit(26)),
(128.109, self.bar_to_slit(25)),
(186.778, self.bar_to_slit(24)),
(186.271, self.bar_to_slit(23)),
(180.272, self.bar_to_slit(22)),
(179.765, self.bar_to_slit(21)),
(148.417, self.bar_to_slit(20)),
(147.91, self.bar_to_slit(19)),
(178.822, self.bar_to_slit(18)),
(178.314, self.bar_to_slit(17)),
(120.197, self.bar_to_slit(16)),
(119.689, self.bar_to_slit(15)),
(140.601, self.bar_to_slit(14)),
(140.094, self.bar_to_slit(13)),
(151.005, self.bar_to_slit(12)),
(150.498, self.bar_to_slit(11)),
(143.947, self.bar_to_slit(10)),
(143.44, self.bar_to_slit(9)),
(106.313, self.bar_to_slit(8)),
(105.806, self.bar_to_slit(7)),
(96.717, self.bar_to_slit(6)),
(96.21, self.bar_to_slit(5)),
(120.202, self.bar_to_slit(4)),
(119.695, self.bar_to_slit(3)),
])
return pixels, physical
``` |
{
"source": "joshwalawender/IQMon",
"score": 2
} |
#### File: iqmon/pipelines/ingest.py
```python
from keckdrpframework.pipelines.base_pipeline import BasePipeline
from keckdrpframework.models.processing_context import ProcessingContext
# MODIFY THIS IMPORT to reflect the name of the module created in the primitives directory
from iqmon.primitives.file_handling import (ReadFITS,
PopulateAdditionalMetaData,
CopyFile,
DeleteOriginal)
from iqmon.primitives.database import RecordFile
class IngestPipeline(BasePipeline):
"""
This pipeline ingests files from their raw location on disk and rewrites
them to the destination directory with a checksum. It then (if spcified)
deletes the original file. Finally, some basic image information and
statistics are recorded to the mongo database.
This is meant to be a very quick sequence which moves the file and records
the file's existence to the database.
"""
event_table = {
"next_file": ("ReadFITS", "reading_file", "populate_metadata"),
"populate_metadata": ("PopulateAdditionalMetaData", "populating_metadata", "copy_file"),
"copy_file": ("CopyFile", "copying_file", "delete_original"),
"delete_original": ("DeleteOriginal", "deleting_original", "record_file"),
"record_file": ("RecordFile", "recording", None),
}
def __init__(self, context: ProcessingContext):
BasePipeline.__init__(self, context)
```
#### File: iqmon/primitives/image_reduction.py
```python
from pathlib import Path
from datetime import datetime, timedelta
import numpy as np
from astropy import units as u
from astropy import stats
from astropy.time import Time
from astropy.nddata import CCDData
import ccdproc
from keckdrpframework.primitives.base_primitive import BasePrimitive
from .utils import pre_condition, post_condition, find_master
##-----------------------------------------------------------------------------
## Primitive: SubtractBias
##-----------------------------------------------------------------------------
class SubtractBias(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
master_dir = self.cfg['Calibrations'].get('DirectoryForMasters', None)
self.master_bias_file = find_master(master_dir, 'Bias', action.args.meta)
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
pre_condition(self, 'master bias is available',
self.master_bias_file is not None),
pre_condition(self, 'Image type is not BIAS',
self.action.args.imtype != 'BIAS'),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
self.log.info(f" Found master bias file: {self.master_bias_file.name}")
master_bias_ccddata = CCDData.read(self.master_bias_file, unit="adu")
self.log.info(f" Subtracting bias")
self.action.args.ccddata = ccdproc.subtract_bias(self.action.args.ccddata,
master_bias_ccddata)
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: SubtractDark
##-----------------------------------------------------------------------------
class SubtractDark(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
master_dir = self.cfg['Calibrations'].get('DirectoryForMasters', None)
self.master_dark_file = find_master(master_dir, 'Dark', action.args.meta)
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
pre_condition(self, 'master dark is available',
self.master_dark_file is not None),
pre_condition(self, 'Image type is not DARK',
self.action.args.imtype != 'DARK'),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
self.log.info(f" Found master dark file: {self.master_dark_file.name}")
master_dark_ccddata = CCDData.read(self.master_dark_file, unit="adu")
self.log.info(f" Subtracting dark")
self.action.args.ccddata = ccdproc.subtract_bias(self.action.args.ccddata,
master_dark_ccddata)
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: GainCorrect
##-----------------------------------------------------------------------------
class GainCorrect(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
gain = self.action.args.meta.get('GAIN', None)
if gain is not None: self.log.debug(f' Using gain = {gain}')
if gain is None:
gain = self.cfg['Telescope'].getfloat('gain', None)
self.log.debug(f' Got gain from config: {gain}')
self.log.debug(' Gain correcting data')
self.action.args.ccddata = ccdproc.gain_correct(self.action.args.ccddata,
gain,
gain_unit=u.electron/u.adu)
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: CreateDeviation
##-----------------------------------------------------------------------------
class CreateDeviation(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
read_noise = self.action.args.meta.get('read_noise', None)
if read_noise is not None: self.log.debug(f' Using read_noise = {read_noise}')
if read_noise is None:
read_noise = self.cfg['Telescope'].getfloat('read_noise', None)
self.log.debug(f' Got read_noise from config: {read_noise}')
self.action.args.ccddata = ccdproc.create_deviation(self.action.args.ccddata,
readnoise=read_noise*u.electron)
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: MakeMasterCalFrame
##-----------------------------------------------------------------------------
class MakeMasterCalFrame(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
self.context = context
date_string = self.action.args.meta['UT date string']
# if not hasattr(self.context, 'date_string'):
# self.context[date_string] = {}
# if self.action.args.imtype not in self.context[date_string].keys():
# self.context[date_string][self.action.args.imtype] = []
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
imtype = self.action.args.imtype
date_string = self.action.args.meta['UT date string']
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
pre_condition(self, 'Image type is cal',
imtype in ['BIAS', 'DARK']),
# pre_condition(self, 'Connected to mongo',
# self.mongo_iqmon is not None),
pre_condition(self, f'do_{imtype}_subtraction is True',
self.cfg['Calibrations'].getboolean(f'do_{imtype}_subtraction', True) is True),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
imtype = self.action.args.imtype
date_string = self.action.args.meta['UT date string']
self.context[date_string][self.action.args.imtype].append(self.action.args.ccddata)
n_cals = len(self.context[date_string][imtype])
self.log.info(f"Found {n_cals} {imtype} files for {date_string}")
if n_cals >= self.cfg['Calibrations'].getint(f"min_{imtype}_frames"):
self.log.info(f"Stacking {n_cals} {imtype} files")
combined = ccdproc.combine(self.context[date_string][imtype],
method='average',
sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,
sigma_clip_func=np.ma.median, sigma_clip_dev_func=stats.mad_std,
)
self.log.info(f" Combined.")
combined_bias.meta['combined'] = True
combined_bias.meta['ncomb'] = n_cals
combined_filename = f'Master{imtype}_{date_string}.fits'
combined_filepath = Path(self.cfg['Calibrations'].get('directory_for_masters'))
combined_file = combined_filepath.joinpath(combined_filename)
if combined_file.exists() is True:
self.log.debug(f" Deleting existing: {combined_file}")
combined_file.unlink()
self.log.info(f" Saving: {combined_file}")
combined.write(combined_file)
return self.action.args
```
#### File: iqmon/primitives/__init__.py
```python
##-----------------------------------------------------------------------------
## Primitive: Template
##-----------------------------------------------------------------------------
# class Template(BasePrimitive):
# """
# """
# def __init__(self, action, context):
# BasePrimitive.__init__(self, action, context)
# self.log = context.pipeline_logger
# self.cfg = self.context.config.instrument
#
# def _pre_condition(self):
# """Check for conditions necessary to run this process"""
# checks = []
# return np.all(checks)
#
# def _post_condition(self):
# """
# Check for conditions necessary to verify that the process ran
# correctly.
# """
# checks = []
# return np.all(checks)
#
# def _perform(self):
# """
# Returns an Argument() with the parameters that depend on this
# operation.
# """
# self.log.info(f"Running {self.__class__.__name__} action")
#
# return self.action.args
```
#### File: iqmon/primitives/photometry.py
```python
from pathlib import Path
from datetime import datetime, timedelta
import numpy as np
from astropy import units as u
from astropy import stats
from astropy.time import Time
from astropy.table import Table, Column
import photutils
import sep
from keckdrpframework.primitives.base_primitive import BasePrimitive
from .utils import pre_condition, post_condition, mode
##-----------------------------------------------------------------------------
## Primitive: MakeSourceMask
##-----------------------------------------------------------------------------
class MakeSourceMask(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
pre_condition(self, 'Extraction requested',
self.cfg['Extract'].getboolean('do_extraction', True) is True),
pre_condition(self, 'Image type is OBJECT',
self.action.args.imtype == 'OBJECT'),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
snr = self.cfg['Extract'].getfloat('source_mask_snr', 5)
self.log.debug(f" Using snr = {snr}")
npixels = self.cfg['Extract'].getfloat('source_mask_npixels', 5)
self.log.debug(f" Using npixels = {npixels}")
source_mask = photutils.make_source_mask(self.action.args.ccddata,
snr, npixels)
self.action.args.source_mask = source_mask
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: CreateBackground
##-----------------------------------------------------------------------------
class CreateBackground(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
pre_condition(self, 'Extraction requested',
self.cfg['Extract'].getboolean('do_extraction', True) is True),
pre_condition(self, 'Image type is OBJECT',
self.action.args.imtype == 'OBJECT'),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
box_size = self.cfg['Extract'].getint('background_box_size', 128)
self.log.debug(f" Using box size = {box_size} pixels")
bkg = photutils.Background2D(self.action.args.ccddata,
box_size=box_size,
mask=self.action.args.source_mask,
sigma_clip=stats.SigmaClip())
self.action.args.background = bkg
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: ExtractStars
##-----------------------------------------------------------------------------
class ExtractStars(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = [pre_condition(self, 'Skip image is not set',
not self.action.args.skip),
pre_condition(self, 'Background has been generated',
self.action.args.background is not None),
pre_condition(self, 'Extraction requested',
self.cfg['Extract'].getboolean('do_extraction', True) is True),
pre_condition(self, 'Image type is OBJECT',
self.action.args.imtype == 'OBJECT'),
]
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
exptime = self.action.args.meta.get('exptime')
pixel_scale = self.cfg['Telescope'].getfloat('pixel_scale', 1)
thresh = self.cfg['Extract'].getint('extract_threshold', 9)
minarea = self.cfg['Extract'].getint('extract_minarea', 7)
mina = self.cfg['Extract'].getfloat('fwhm_mina', 1)
minb = self.cfg['Extract'].getfloat('fwhm_minb', 1)
faint_limit_pct = self.cfg['Extract'].getfloat('faint_limit_percentile', 0)
bright_limit_pct = self.cfg['Extract'].getfloat('bright_limit_percentile', 100)
radius_limit = self.cfg['Extract'].getfloat('radius_limit_pix', 4000)
bsub = self.action.args.ccddata.data - self.action.args.background.background
seperr = self.action.args.ccddata.uncertainty.array
sepmask = self.action.args.ccddata.mask
# Define quick utility function
def run_sep(bsub, seperr, sepmask, thresh, minarea):
try:
objects = sep.extract(bsub, err=seperr, mask=sepmask,
thresh=float(thresh), minarea=minarea)
return objects
except Exception as e:
if str(e)[:27] == 'internal pixel buffer full:':
return None
else:
raise SEPError(str(e))
objects = None
while objects is None:
try:
self.log.info(f'Invoking SEP with threshold: {thresh}')
objects = run_sep(bsub, seperr, sepmask, thresh, minarea)
thresh += 9
except SEPError as e:
self.log.error('Source extractor failed:')
self.log.error(e)
return self.action.args
t = Table(objects)
t['flux'] /= exptime
# Add radius (of star from center of image) to table
ny, nx = bsub.shape
r = np.sqrt((t['x']-nx/2.)**2 + (t['y']-ny/2.)**2)
t.add_column(Column(data=r.data, name='r', dtype=np.float))
# Add FWHM to table
coef = 2*np.sqrt(2*np.log(2))
fwhm = np.sqrt((coef*t['a'])**2 + (coef*t['b'])**2)
t.add_column(Column(data=fwhm.data, name='FWHM', dtype=np.float))
# Add ellipticities to table
ellipticities = t['a']/t['b']
t.add_column(Column(data=ellipticities.data, name='ellipticity', dtype=np.float))
# Filter out stars based on bright and faint limits
faint_limit = np.percentile(t['flux'], faint_limit_pct)
bright_limit = np.percentile(t['flux'], bright_limit_pct)
self.log.info(f' Faintest {faint_limit_pct:.1f}% flux {faint_limit:f}')
self.log.info(f' Brightest {bright_limit_pct:.1f}% flux {bright_limit:f}')
filtered = (t['a'] < mina) | (t['b'] < minb) | (t['flag'] > 0) | (t['flux'] > bright_limit) | (t['flux'] < faint_limit) | (t['r'] > radius_limit)
self.log.debug(f' Removing {np.sum(filtered):d}/{len(filtered):d}'\
f' extractions from FWHM calculation')
self.log.debug(f" {np.sum( (t['a'] < mina) )} removed for fwhm_mina limit")
self.log.debug(f" {np.sum( (t['b'] < minb) )} removed for fwhm_minb limit")
self.log.debug(f" {np.sum( (t['flag'] > 0) )} removed for source extractor flags")
self.log.debug(f" {np.sum( (t['flux'] < faint_limit) )} removed for faint limit")
self.log.debug(f" {np.sum( (t['flux'] > bright_limit) )} removed for bright limit")
self.action.args.meta['n_objects'] = len(t[~filtered])
self.log.info(f' Found {self.action.args.meta.get("n_objects"):d} stars')
self.action.args.objects = t[~filtered]
self.action.args.objects.sort('flux')
self.action.args.objects.reverse()
if self.action.args.meta.get("n_objects") == 0:
self.log.warning('No stars found')
return self.action.args
else:
FWHM_pix = np.median(t['FWHM'][~filtered])
FWHM_mode_bin = pixel_scale*0.25
FWHM_pix_mode = mode(t['FWHM'][~filtered]/FWHM_mode_bin)*FWHM_mode_bin
self.log.info(f' Median FWHM = {FWHM_pix:.1f} pix ({FWHM_pix*pixel_scale:.2f} arcsec)')
self.log.info(f' Mode FWHM = {FWHM_pix_mode:.1f} pix ({FWHM_pix_mode*pixel_scale:.2f} arcsec)')
ellipticity = np.median(t['ellipticity'][~filtered])
ellipticity_mode_bin = 0.05
ellipticity_mode = mode(t['ellipticity'][~filtered]/ellipticity_mode_bin)*ellipticity_mode_bin
self.log.info(f' Median ellipticity = {ellipticity:.2f}')
self.log.info(f' Mode ellipticity = {ellipticity_mode:.2f}')
self.action.args.meta['fwhm'] = FWHM_pix_mode
self.action.args.meta['ellipticity'] = ellipticity_mode
## Do photutils photometry measurement
positions = [(det['x'], det['y']) for det in self.action.args.objects]
ap_radius = self.cfg['Photometry'].getfloat('aperture_radius', 2)*FWHM_pix
star_apertures = photutils.CircularAperture(positions, ap_radius)
sky_apertures = photutils.CircularAnnulus(positions,
r_in=int(np.ceil(1.5*ap_radius)),
r_out=int(np.ceil(2.0*ap_radius)))
phot_table = photutils.aperture_photometry(
self.action.args.ccddata,
[star_apertures, sky_apertures])
phot_table['sky'] = phot_table['aperture_sum_1'] / sky_apertures.area()
med_sky = np.median(phot_table['sky'])
self.log.info(f' Median Sky = {med_sky.value:.0f} e-/pix')
self.action.args.meta['sky_background'] = med_sky.value
self.action.args.objects.add_column(phot_table['sky'])
bkg_sum = phot_table['aperture_sum_1'] / sky_apertures.area() * star_apertures.area()
final_sum = (phot_table['aperture_sum_0'] - bkg_sum)
final_uncert = (bkg_sum + final_sum)**0.5 * u.electron**0.5
phot_table['apflux'] = final_sum/exptime
self.action.args.objects.add_column(phot_table['apflux'])
phot_table['apuncert'] = final_uncert/exptime
self.action.args.objects.add_column(phot_table['apuncert'])
phot_table['snr'] = final_sum/final_uncert
self.action.args.objects.add_column(phot_table['snr'])
where_bad = (final_sum <= 0)
self.log.info(f' {np.sum(where_bad)} stars rejected for flux < 0')
self.action.args.objects = self.action.args.objects[~where_bad]
self.action.args.meta['n_fluxes'] = len(self.action.args.objects)
self.log.info(f' Fluxes for {self.action.args.meta["n_fluxes"]:d} stars')
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: GetCalibrationStars
##-----------------------------------------------------------------------------
class GetCalibrationStars(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = []
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
return self.action.args
##-----------------------------------------------------------------------------
## Primitive: AssociateCalibratorStars
##-----------------------------------------------------------------------------
class AssociateCalibratorStars(BasePrimitive):
"""
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.log = context.pipeline_logger
self.cfg = self.context.config.instrument
def _pre_condition(self):
"""Check for conditions necessary to run this process"""
checks = []
return np.all(checks)
def _post_condition(self):
"""
Check for conditions necessary to verify that the process ran
correctly.
"""
checks = []
return np.all(checks)
def _perform(self):
"""
Returns an Argument() with the parameters that depend on this
operation.
"""
self.log.info(f"Running {self.__class__.__name__} action")
return self.action.args
```
#### File: iqmon/webpage/__init__.py
```python
from pathlib import Path
import logging
import pymongo
from datetime import datetime, timedelta
from time import sleep
from astroplan import Observer
from astropy import coordinates as c
from astropy.time import Time
##-------------------------------------------------------------------------
## Create logger object
##-------------------------------------------------------------------------
log = logging.getLogger('FlaskLogger')
log.setLevel(logging.DEBUG)
LogFormat = logging.Formatter('%(asctime)s %(levelname)8s: %(message)s')
## Set up console output
## Set up file output
LogFileName = '/usr/local/var/log/flask.log'
LogFileHandler = logging.FileHandler(LogFileName)
LogFileHandler.setLevel(logging.DEBUG)
LogFileHandler.setFormatter(LogFormat)
log.addHandler(LogFileHandler)
##-------------------------------------------------------------------------
## Function: mongo_query
##-------------------------------------------------------------------------
def mongo_query(collection, query_dict, cfg,
distinct=False, count=False, last=False,
sort=[('date', pymongo.ASCENDING)]):
log.debug(f'Connecting to mongo db, collection {collection}')
mongo_host = cfg['mongo'].get('host')
mongo_port = cfg['mongo'].getint('port')
mongo_db = cfg['mongo'].get('db')
mongoclient = pymongo.MongoClient(mongo_host, mongo_port)
mongo_iqmon = mongoclient[mongo_db][collection]
if distinct is True:
query_result = list(mongo_iqmon.distinct(query_dict))
elif count is True:
query_result = mongo_iqmon.find(query_dict).count()
elif last is True:
query_result = list(mongo_iqmon.find(query_dict,
sort=[('date', pymongo.DESCENDING)]).limit(1))
else:
query_result = list(mongo_iqmon.find(query_dict, sort=sort))
mongoclient.close()
return query_result
##-------------------------------------------------------------------------
## Function: get_twilights
##-------------------------------------------------------------------------
def get_twilights(start, end, webcfg, nsample=256):
""" Determine sunrise and sunset times """
location = c.EarthLocation(
lat=webcfg['site'].getfloat('site_lat'),
lon=webcfg['site'].getfloat('site_lon'),
height=webcfg['site'].getfloat('site_elevation'),
)
obs = Observer(location=location, name=webcfg['site'].get('name', ''))
sunset = obs.sun_set_time(Time(start), which='next').datetime
sunrise = obs.sun_rise_time(Time(start), which='next').datetime
# Calculate and order twilights and set plotting alpha for each
twilights = [(start, 'start', 0.0),
(sunset, 'sunset', 0.0),
(obs.twilight_evening_civil(Time(start),
which='next').datetime, 'ec', 0.1),
(obs.twilight_evening_nautical(Time(start),
which='next').datetime, 'en', 0.2),
(obs.twilight_evening_astronomical(Time(start),
which='next').datetime, 'ea', 0.3),
(obs.twilight_morning_astronomical(Time(start),
which='next').datetime, 'ma', 0.5),
(obs.twilight_morning_nautical(Time(start),
which='next').datetime, 'mn', 0.3),
(obs.twilight_morning_civil(Time(start),
which='next').datetime, 'mc', 0.2),
(sunrise, 'sunrise', 0.1),
]
twilights.sort(key=lambda x: x[0])
final = {'sunset': 0.1, 'ec': 0.2, 'en': 0.3, 'ea': 0.5,
'ma': 0.3, 'mn': 0.2, 'mc': 0.1, 'sunrise': 0.0}
twilights.append((end, 'end', final[twilights[-1][1]]))
return twilights
##-------------------------------------------------------------------------
## Function: overplot_twilights
##-------------------------------------------------------------------------
def overplot_twilights(plot_list, plot_end, webcfg, plot_ndays=1, log=None):
for days in range(1,plot_ndays+1):
if log is not None: log.info(f'Getting twilights for {days} days ago')
twilights = get_twilights(plot_end-timedelta(days=days),
plot_end-timedelta(days=days-1),
webcfg)
for plot_info in plot_list:
for j in range(len(twilights)-1):
name, plot, top, bottom = plot_info
plot.quad(top=[top], bottom=[bottom],
left=[twilights[j][0]], right=[twilights[j+1][0]],
color="blue", alpha=twilights[j+1][2])
if log is not None: log.info(f' Added twilights for {days} days ago')
return
``` |
{
"source": "joshwalawender/KeckStarList",
"score": 2
} |
#### File: KeckStarList/mainland-observing/mainlandobs_stats.py
```python
import sys
import os
from datetime import datetime as dt
import pymysql
import pymysql.cursors
import numpy as np
from astropy.table import Table, Column, vstack
import matplotlib as mpl
mpl.rcParams['font.size'] = 24
import matplotlib.pyplot as plt
def main():
colormap = plt.cm.gist_ncar
semesters = {2005.5: ('2005-08-01', '2006-01-31'),
2006.0: ('2006-02-01', '2006-07-31'),
2006.5: ('2006-08-01', '2007-01-31'),
2007.0: ('2007-02-01', '2007-07-31'),
2007.5: ('2007-08-01', '2008-01-31'),
2008.0: ('2008-02-01', '2008-07-31'),
2008.5: ('2008-08-01', '2009-01-31'),
2009.0: ('2009-02-01', '2009-07-31'),
2009.5: ('2009-08-01', '2010-01-31'),
2010.0: ('2010-02-01', '2010-07-31'),
2010.5: ('2010-08-01', '2011-01-31'),
2011.0: ('2011-02-01', '2011-07-31'),
2011.5: ('2011-08-01', '2012-01-31'),
2012.0: ('2012-02-01', '2012-07-31'),
2012.5: ('2012-08-01', '2013-01-31'),
2013.0: ('2013-02-01', '2013-07-31'),
2013.5: ('2013-08-01', '2014-01-31'),
2014.0: ('2014-02-01', '2014-07-31'),
2014.5: ('2014-08-01', '2015-01-31'),
2015.0: ('2015-02-01', '2015-07-31'),
2015.5: ('2015-08-01', '2016-01-31'),
2016.0: ('2016-02-01', '2016-07-31'),
2016.5: ('2016-08-01', '2017-01-31'),
2017.0: ('2017-02-01', '2017-07-31'),
2017.5: ('2017-08-01', '2018-01-31'),
2018.0: ('2018-02-01', '2018-07-31'),
2018.5: ('2018-08-01', '2019-01-31'),
}
# table_file = 'MainlandObserving.csv'
# if not os.path.exists(table_file):
print('Querying SQL Database')
# Connect to the database
connection = pymysql.connect(host='mysqlserver',
user='sched',
password='<PASSWORD>',
db='schedules',
cursorclass=pymysql.cursors.DictCursor)
names = ['Status', 'Telescope', 'ReqNo', 'AllocInst', 'Site', 'Instrument', 'Portion', 'FromDate', 'Mode', 'NumNights', 'Principal']
dtypes = ['a20', 'a8', 'i8', 'a20', 'a20', 'a20', 'a20', 'a20', 'a20', 'i4', 'a40']
try:
tab = None
for semester in sorted(semesters.keys()):
fields = "ReqNo,FromDate,NumNights,Portion,Telescope,Instrument,AllocInst,Site,Mode,Principal,Status"
table = "mainlandObs"
date1, date2 = semesters[semester]
conditions = ["FromDate between '{}' and '{}'".format(date1, date2),
"status = 'approved'"]
condition = "where {}".format(" and ".join(conditions))
sql = "select {} from {} {}".format(fields, table, condition)
with connection.cursor() as cursor:
cursor.execute(sql)
result = cursor.fetchall()
print('{}: found {:d} mainland requests'.format(semester, len(result)))
if len(result) > 0:
new = Table(result, names=names, dtype=dtypes)
sem = Column([semester]*len(new), name='Semester', dtype='f4')
new.add_column(sem)
if not tab:
tab = new
else:
tab = vstack([tab, new])
finally:
connection.close()
# tab.write(table_file)
# else:
# print('Reading Local File')
# tab = Table.read(table_file)
## Do not use 2017A semester requests
# tab.remove_rows(tab['Semester'] == 2017.0)
## Weight
count = {'Full Night': 1, 'Full': 1, 'First Half': 0.5, 'Second Half': 0.5,
'Other': 0, 'K1': 1, 'K2': 1, 'K1+K2': 2, 'None': 0}
weight = [ count[x['Telescope']] * count[x['Portion']] * float(x['NumNights']) for x in tab ]
unscaledweight = [ count[x['Telescope']] * float(x['NumNights']) for x in tab ]
tab.add_column(Column(weight, name='Weight'))
tab.add_column(Column(unscaledweight, name='Unscaled Weight'))
# print(tab[tab['Site'] == 'ANU'])
## ------------------------------------------------------------------------
## Number of Sites Over Time
## ------------------------------------------------------------------------
tab.sort('FromDate')
sitestab = Table(names=('Site', 'Eavesdrop', 'Mainland Only'), dtype=('a20', 'a10', 'a10'))
for i,entry in enumerate(tab):
sites = entry['Site'].split(' ')
for site in sites:
if site not in sitestab['Site'].data.astype(str) and site != 'Other':
if entry['Mode'] == 'Mainland Only':
sitestab.add_row((site, '-', entry['FromDate']))
elif entry['Mode'] == 'Eavesdrop':
sitestab.add_row((site, entry['FromDate'], '-'))
elif entry['Mode'] in ['Eavesdrop', 'Mainland Only']:
if sitestab[np.where(sitestab['Site'].data.astype(str) == site)][entry['Mode']] == b'-':
sitestab[entry['Mode']][np.where(sitestab['Site'].data.astype(str) == site)] = entry['FromDate']
print('First use date by site and mode:')
print(sitestab)
print('')
## ------------------------------------------------------------------------
## Mainland Only and Eavesdrop Use by Semester
## ------------------------------------------------------------------------
stab = Table(names=('Semester', 'Eavesdrop Nights', 'Mainland Only Nights'),
dtype=('f4', 'f4', 'f4'))
bysemester = tab.group_by('Semester')
mode = {}
for i,val in enumerate(bysemester.groups):
thissemester = bysemester.groups[i]
mainlandonly = thissemester[thissemester['Mode'] == 'Mainland Only']
mainlandonly_sum = sum(mainlandonly['Weight'])
eavesdrop = thissemester[thissemester['Mode'] == 'Eavesdrop']
eavesdrop_sum = sum(eavesdrop['Weight'])
stab.add_row((thissemester[0]['Semester'], eavesdrop_sum, mainlandonly_sum))
plt.figure(figsize=(16,9), dpi=300)
ax1 = plt.gca()
plt.bar(stab['Semester'], stab['Mainland Only Nights'],
width=0.4, color='b', alpha=0.9, label='Mainland Only')
plt.bar(stab['Semester'], stab['Mainland Only Nights']+stab['Eavesdrop Nights'],
width=0.4, color='b', alpha=0.3, label='Eavesdrop')
plt.ylim(0,300)
plt.xticks(np.arange(2006, 2034), ["{:02d}".format(x-2000) for x in np.arange(2006, 2034)])
plt.xlim(2006, 2018.5)
plt.grid(axis='x')
plt.xlabel('Semester')
plt.ylabel('Nights')
plt.legend(loc='best')
ax2 = ax1.twinx()
plt.ylabel('Fraction of Total Nights')
plt.ylim(0,300./365.*2./2.)
plt.grid(axis='y')
plt.savefig('use_by_semester.png', dpi=300, bbox_inches='tight', pad_inches=0.1)
## ------------------------------------------------------------------------
## Use by Site
## ------------------------------------------------------------------------
# sitecounts = {}
# for i,entry in enumerate(tab):
# sites = entry['Site'].split(' ')
# weight = entry['Weight']
# for site in sites:
# if site not in sitecounts.keys():
# sitecounts[site] = weight
# else:
# sitecounts[site] += weight
#
# sitelist = sorted(sitecounts.keys())
# countlist = [sitecounts[site] for site in sitelist]
# labels = ['{}: {:.0f}%'.format(site, sitecounts[site]/sum(countlist)*100.)
# for site in sitelist]
# colors = colormap(np.arange(len(countlist))/len(countlist))
#
# UCcounts = [sitecounts[site] for site in ['UCB', 'UCD', 'UCI', 'UCLA', 'UCR', 'UCSB', 'UCSC', 'UCSD']]
# UCpct = sum(UCcounts) / sum(countlist)*100.
# print('UC System Use = {:.1f} ({:.1f} %)'.format(sum(UCcounts), UCpct))
#
#
# plt.figure(figsize=(12,9), dpi=300)
# ax = plt.gca()
# ax.set_aspect('equal')
# patches, plt_labels = plt.pie(countlist, labels=labels, colors=colors, startangle=90)
# plt_labels[3].get_position()
# plt_labels[3].get_rotation()
# plt_labels[3]._y += 0.02
# plt_labels[3]._x += 0.02
# plt_labels[4].get_position()
# plt_labels[4].get_rotation()
# plt_labels[4]._y -= 0.04
# plt_labels[4]._x += 0.10
# plt_labels[5].get_position()
# plt_labels[5].get_rotation()
# plt_labels[5]._y -= 0.09
# plt_labels[5]._x += 0.09
# plt_labels[13].get_position()
# plt_labels[13].get_rotation()
# plt_labels[13]._y -= 0.03
# plt.title('Use by Site')
# plt.savefig('use_by_site.png', dpi=300, bbox_inches='tight', pad_inches=0.1)
## ------------------------------------------------------------------------
## Use by Instrument
## ------------------------------------------------------------------------
# instruments = {'NIRSPAO': 0., 'NIRSPEC': 0., 'DEIMOS': 0., 'ESI': 0., 'NIRC2': 0.,
# 'LRIS': 0., 'MOSFIRE': 0., 'HIRES': 0., 'OSIRIS': 0.,
# 'Other': 0.}
#
# for i,entry in enumerate(tab):
# instlist = entry['Instrument'].split(',')
# weight = entry['Weight']
# for inst in instlist:
# if inst in instruments.keys():
# instruments[inst] += weight
# else:
# instruments['Other'] += weight
#
# instlist = ['NIRSPAO', 'NIRSPEC', 'DEIMOS', 'ESI', 'NIRC2',
# 'LRIS', 'MOSFIRE', 'HIRES', 'OSIRIS', 'Other']
# countlist = [instruments[inst] for inst in instlist]
# labels = ['{}: {:.0f}%'.format(inst, instruments[inst]/sum(countlist)*100.)
# for inst in instlist]
# colors = colormap(np.arange(len(countlist))/len(countlist))
#
# plt.figure(figsize=(12,9), dpi=300)
# ax = plt.gca()
# ax.set_aspect('equal')
# patches, plt_labels = plt.pie(countlist, labels=labels, colors=colors)
# plt_labels[3].get_position()
# plt_labels[3].get_rotation()
# plt_labels[3]._x += 0.11
# plt_labels[9].get_position()
# plt_labels[9].get_rotation()
# plt_labels[9]._y -= 0.03
# plt.title('Use by Instrument')
# plt.savefig('use_by_instrument.png', dpi=300, bbox_inches='tight', pad_inches=0.1)
#
# K1counts = [instruments[inst] for inst in ['LRIS', 'MOSFIRE', 'HIRES', 'OSIRIS']]
# K2counts = [instruments[inst] for inst in ['NIRSPAO', 'NIRSPEC', 'DEIMOS', 'ESI', 'NIRC2']]
# K1total = sum(K1counts)
# K2total = sum(K2counts)
# K1pct = K1total / sum(countlist)*100.
# K2pct = K2total / sum(countlist)*100.
# print('Total K1 nights = {:.1f} ({:.1f} %)'.format(K1total, K1pct))
# print('Total K2 nights = {:.1f} ({:.1f} %)'.format(K2total, K2pct))
if __name__ == '__main__':
main()
```
#### File: KeckStarList/SlitAlign/slitAlign.py
```python
import sys
import os
import argparse
import logging
from matplotlib import pyplot as plt
from scipy import ndimage
import numpy as np
from astropy.io import fits
from astropy import units as u
from astropy.modeling import models, fitting, Fittable2DModel, Parameter
from astropy.table import Table
from ccdproc import CCDData, combine, Combiner, flat_correct, trim_image, median_filter
##-------------------------------------------------------------------------
## Create logger object
##-------------------------------------------------------------------------
log = logging.getLogger('MyLogger')
log.setLevel(logging.DEBUG)
## Set up console output
LogConsoleHandler = logging.StreamHandler()
LogConsoleHandler.setLevel(logging.DEBUG)
LogFormat = logging.Formatter('%(asctime)s %(levelname)8s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
LogConsoleHandler.setFormatter(LogFormat)
log.addHandler(LogConsoleHandler)
## Set up file output
# LogFileName = None
# LogFileHandler = logging.FileHandler(LogFileName)
# LogFileHandler.setLevel(logging.DEBUG)
# LogFileHandler.setFormatter(LogFormat)
# log.addHandler(LogFileHandler)
##-------------------------------------------------------------------------
## mosfireAlignmentBox
##-------------------------------------------------------------------------
class mosfireAlignmentBox(Fittable2DModel):
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
x_width = Parameter(default=1)
y_width = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
'''MOSFIRE Alignment Box.
Typical widths are 22.5 pix horizontally and 36.0 pix vertically.
Angle of slit relative to pixels is 3.78 degrees.
'''
slit_angle = -3.7 # in degrees
x0_of_y = x_0 + (y-y_0)*np.sin(slit_angle*np.pi/180)
x_range = np.logical_and(x >= x0_of_y - x_width / 2.,
x <= x0_of_y + x_width / 2.)
y_range = np.logical_and(y >= y_0 - y_width / 2.,
y <= y_0 + y_width / 2.)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, u.Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['y']),
('x_width', inputs_unit['x']),
('y_width', inputs_unit['y']),
('amplitude', outputs_unit['z'])])
##-------------------------------------------------------------------------
## Transformations (copied from CSU initializer code)
##-------------------------------------------------------------------------
def pad(x):
'''Pad array for affine transformation.
'''
return np.hstack([x, np.ones((x.shape[0], 1))])
def unpad(x):
'''Unpad array for affine transformation.
'''
return x[:,:-1]
def slit_to_bars(slit):
'''Given a slit number (1-46), return the two bar numbers associated
with that slit.
'''
return (slit*2-1, slit*2)
def bar_to_slit(bar):
'''Given a bar number, retun the slit associated with that bar.
'''
return int((bar+1)/2)
def pixel_to_physical(x):
'''Using the affine transformation determined by `fit_transforms`,
convert a set of pixel coordinates (X, Y) to physical coordinates (mm,
slit).
'''
Apixel_to_physical = [[ -1.30490576e-01, 8.06611058e-05, 0.00000000e+00],
[ -4.19125389e-04, -2.25757176e-02, 0.00000000e+00],
[ 2.73934450e+02, 4.66399772e+01, 1.00000000e+00]]
x = np.array(x)
result = unpad(np.dot(pad(x), Apixel_to_physical))
return result
def physical_to_pixel(x):
'''Using the affine transformation determined by `fit_transforms`,
convert a set of physical coordinates (mm, slit) to pixel coordinates
(X, Y).
'''
Aphysical_to_pixel = [[ -7.66328913e+00, -2.73804045e-02, 0.00000000e+00],
[ 1.42268848e-01, -4.42948641e+01, 0.00000000e+00],
[ 2.09260502e+03, 2.07341206e+03, 1.00000000e+00]]
x = np.array(x)
result = unpad(np.dot(pad(x), Aphysical_to_pixel))
return result
def fit_transforms(pixels, targets):
'''Given a set of pixel coordinates (X, Y) and a set of target
coordinates (X, Y), fit the affine transformations (forward and
backward) to convert between the two coordinate systems.
'''
if type(pixels) == list:
pixels = np.array(pixels)
if type(targets) == list:
targets = np.array(targets)
assert pixels.shape[1] == 2
assert targets.shape[1] == 2
assert pixels.shape[0] == targets.shape[0]
# Pad the data with ones, so that our transformation can do translations too
n = pixels.shape[0]
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
X = pad(pixels)
Y = pad(targets)
# Solve the least squares problem X * A = Y
# to find our transformation matrix A
A, res, rank, s = np.linalg.lstsq(X, Y, rcond=None)
A[np.abs(A) < 1e-10] = 0
# Check Scale
thetas = np.array([np.arcsin(A[0,1])*180/np.pi, np.arcsin(A[1,0])*-180/np.pi])
thetadiff = np.abs(thetas[0] - thetas[1])
Sx = A[0,0]/np.cos(np.mean(thetas)*np.pi/180)
Sy = A[1,1]/np.cos(np.mean(thetas)*np.pi/180)
print(f"Scale Factor: {Sx:.4f}, {Sy:.4f}")
off_X = -A[2,0]
off_Y = -A[2,1]
off_R = -np.mean(thetas)
err_R = thetadiff/2
return (off_X, off_Y, off_R, err_R, A)
##-------------------------------------------------------------------------
## Fit CSU Edges (copied from CSU initializer code)
##-------------------------------------------------------------------------
def fit_CSU_edges(profile):
fitter = fitting.LevMarLSQFitter()
amp1_est = profile[profile == min(profile)][0]
mean1_est = np.argmin(profile)
amp2_est = profile[profile == max(profile)][0]
mean2_est = np.argmax(profile)
g_init1 = models.Gaussian1D(amplitude=amp1_est, mean=mean1_est, stddev=2.)
g_init1.amplitude.max = 0
g_init1.amplitude.min = amp1_est*0.9
g_init1.stddev.max = 3
g_init2 = models.Gaussian1D(amplitude=amp2_est, mean=mean2_est, stddev=2.)
g_init2.amplitude.min = 0
g_init2.amplitude.min = amp2_est*0.9
g_init2.stddev.max = 3
model = g_init1 + g_init2
fit = fitter(model, range(0,profile.shape[0]), profile)
# Check Validity of Fit
if abs(fit.stddev_0.value) <= 3 and abs(fit.stddev_1.value) <= 3\
and fit.amplitude_0.value < -1 and fit.amplitude_1.value > 1\
and fit.mean_0.value > fit.mean_1.value:
x = [fit.mean_0.value, fit.mean_1.value]
x1 = int(np.floor(min(x)-1))
x2 = int(np.ceil(max(x)+1))
else:
x1 = None
x2 = None
return x1, x2
##-------------------------------------------------------------------------
## Create Master Flat
##-------------------------------------------------------------------------
def create_master_flat(filepath='../../../KeckData/MOSFIRE_FCS/',
flatfiles = ['m180130_0320.fits',
'm180130_0321.fits',
'm180130_0322.fits',
'm180130_0323.fits',
'm180130_0324.fits',],
darkfile = 'm180130_0001.fits',
):
dark = CCDData.read(os.path.join(filepath, darkfile), unit='adu')
flats = []
for i,file in enumerate(flatfiles):
flat = CCDData.read(os.path.join(filepath, file), unit='adu')
flat = flat.subtract(dark)
flats.append(flat)
flat_combiner = Combiner(flats)
flat_combiner.sigma_clipping()
scaling_func = lambda arr: 1/np.ma.average(arr)
flat_combiner.scaling = scaling_func
masterflat = flat_combiner.median_combine()
masterflat.write('masterflat.fits', overwrite=True)
##-------------------------------------------------------------------------
## Reduce Image
##-------------------------------------------------------------------------
def reduce_image(imagefile, dark=None, flat=None):
im = CCDData.read(imagefile, unit='adu')
if dark is not None:
dark = CCDData.read(dark, unit='adu')
im = im.subtract(dark)
if flat is not None:
# masterflat = CCDData.read(flat, unit='adu')
hdul = fits.open(flat)
masterflat = CCDData(data=hdul[0].data, uncertainty=None, meta=hdul[0].header, unit='adu')
im = flat_correct(im, masterflat)
return im
# im = fits.open(imagefile)
# if dark is not None:
# masterdark = fits.open(dark)
# im[0].data -= masterdark[0].data
# if flat is not None:
# masterflat = fits.open(flat)
# norm = np.nanmedian(masterflat[0].data)
# im[0].data /= (masterflat[0].data / norm)
# return im
##-------------------------------------------------------------------------
## fit_alignment_box
##-------------------------------------------------------------------------
def fit_alignment_box(region, box_size=30, verbose=False, seeing=None,
medfilt=False):
pixelscale = u.pixel_scale(0.1798*u.arcsec/u.pixel)
if medfilt is True:
region = median_filter(region, size=(3,3))
# Estimate center of alignment box
threshold_pct = 80
window = region.data > np.percentile(region.data, threshold_pct)
alignment_box_position = ndimage.measurements.center_of_mass(window)
offset_val = np.median(region.data[~window])
offset = models.Const2D(offset_val)
# Determine fluctuations in sky
sky_amplitude = np.median(region.data[window])
sky_fluctuations = np.std(region.data[window])
# Detect box edges
gradx = np.gradient(region.data, axis=1)
horizontal_profile = np.sum(gradx, axis=0)
h_edges = fit_CSU_edges(horizontal_profile)
grady = np.gradient(region.data, axis=0)
vertical_profile = np.sum(grady, axis=1)
v_edges = fit_CSU_edges(vertical_profile)
# Estimate stellar position
maxr = np.max(region.data)
starloc = (np.where(region == maxr)[0][0],
np.where(region == maxr)[1][0])
# Build model of sky, star, & box
boxamplitude = 1
box = mosfireAlignmentBox(boxamplitude, alignment_box_position[1], alignment_box_position[0],\
abs(h_edges[0]-h_edges[1]), abs(v_edges[0]-v_edges[1]))
box.amplitude.fixed = True
box.x_width.min = 10
box.y_width.min = 10
sky = models.Const2D(sky_amplitude)
sky.amplitude.min = 0
star_amplitude = maxr - sky_amplitude
star_sigma = star_amplitude / sky_fluctuations
if star_sigma < 5:
if verbose: print(f'No star detected. sigma={star_sigma:.1f}')
return [None]*4
else:
if verbose: print(f'Detected peak pixel {star_sigma:.1f} sigma above sky.')
star = models.Gaussian2D(amplitude=star_amplitude,
x_mean=starloc[1], y_mean=starloc[0],
x_stddev=2, y_stddev=2)
# print(h_edges)
# print(v_edges)
# star.y_mean.min = v_edges[0]
# star.y_mean.max = v_edges[1]
# star.x_mean.min = h_edges[0]
# star.x_mean.max = h_edges[1]
star.amplitude.min = 5*sky_fluctuations
star.x_stddev.min = 1 # FWHM = 2.355*stddev = 0.42 arcsec FWHM
star.x_stddev.max = 4 # FWHM = 2.355*stddev = 1.47 arcsec FWHM
star.y_stddev.min = 1
star.y_stddev.max = 4
if seeing is not None and seeing > 0:
sigma = (seeing / 2.355 * u.arcsec).to(u.pixel, equivalencies=pixelscale)
star.x_stddev.min = max(2, sigma.value-1)
star.y_stddev.min = max(2, sigma.value-1)
star.x_stddev.max = min(sigma.value+1, 4)
star.y_stddev.max = min(sigma.value+1, 4)
# print(f"Using seeing value {seeing} arcsec. sigma limits {star.x_stddev.min}, {star.x_stddev.max} pix")
model = box*(sky + star) + offset
# modelim = np.zeros((61,61))
# fitim = np.zeros((61,61))
# for i in range(0,60):
# for j in range(0,60):
# modelim[j,i] = model(i,j)
# fitim[j,i] = model(i,j)
# residuals = region.data-fitim
# residualsum = np.sum(residuals)
# import pdb ; pdb.set_trace()
fitter = fitting.LevMarLSQFitter()
y, x = np.mgrid[:2*box_size+1, :2*box_size+1]
fit = fitter(model, x, y, region.data)
FWHMx = 2*(2*np.log(2))**0.5*fit.x_stddev_2.value * u.pix
FWHMy = 2*(2*np.log(2))**0.5*fit.y_stddev_2.value * u.pix
FWHM = (FWHMx**2 + FWHMy**2)**0.5/2**0.5
FWHMarcsec = FWHM.to(u.arcsec, equivalencies=pixelscale)
sky_amplitude = fit.amplitude_1.value
star_flux = 2*np.pi*fit.amplitude_2.value*fit.x_stddev_2.value*fit.y_stddev_2.value
star_amplitude = fit.amplitude_2.value
boxpos_x = fit.x_0_0.value
boxpos_y = fit.y_0_0.value
star_x = fit.x_mean_2.value
star_y = fit.y_mean_2.value
if verbose: print(f" Box X Center = {boxpos_x:.0f}")
if verbose: print(f" Box Y Center = {boxpos_y:.0f}")
if verbose: print(f" Sky Brightness = {fit.amplitude_1.value:.0f} ADU")
if verbose: print(f" Stellar FWHM = {FWHMarcsec:.2f}")
if verbose: print(f" Stellar Xpos = {star_x:.0f}")
if verbose: print(f" Stellar Xpos = {star_y:.0f}")
if verbose: print(f" Stellar Amplitude = {star_amplitude:.0f} ADU")
if verbose: print(f" Stellar Flux (fit) = {star_flux:.0f} ADU")
result = {'Star X': star_x,
'Star Y': star_y,
'Star Amplitude': star_amplitude,
'Sky Amplitude': sky_amplitude,
'FWHM pix': FWHM.value,
'FWHM arcsec': FWHMarcsec,
'Box X': boxpos_x,
'Box Y': boxpos_y,
# 'Residuals': residuals,
}
return result
def analyze_image(imagefile, dark=None, flat=None, box_size=30, medfilt=False,
plot=False, seeing=0, pixelscale=0.1798, verbose=False):
im = reduce_image(imagefile, dark=dark, flat=flat)
hdul = fits.open(imagefile)
# Get info about alignment box positions
alignment_box_table = Table(hdul[4].data)
if plot == True:
plt.figure(figsize=(16,6))
pixels = []
targets = []
for i,box in enumerate(alignment_box_table):
result = None
slitno = int(box['Slit_Number'])
bar_nos = slit_to_bars(slitno)
bar_pos = [hdul[0].header.get(f'B{b:02d}POS') for b in bar_nos]
box_pos = np.mean(bar_pos)
box_pix = physical_to_pixel([[box_pos, slitno]])[0]
boxat = [int(box_pix[0]), int(box_pix[1])]
fits_section = f'[{boxat[0]-box_size:d}:{boxat[0]+box_size:d}, '\
f'{boxat[1]-box_size:d}:{boxat[1]+box_size:d}]'
region = trim_image(im, fits_section=fits_section)
targ_pos = float(box['Target_to_center_of_slit_distance'])/pixelscale
if plot == True:
plt.subplot(1,len(alignment_box_table),i+1, aspect='equal')
plt.title(f"Alignment Box {i+1}\n{fits_section}")
plt.imshow(region.data, origin='lower',
vmin=np.percentile(region.data, 85)*0.95,
vmax=region.data.max()*1.02)
# try:
result = fit_alignment_box(region, box_size=box_size, verbose=False,
seeing=seeing, medfilt=medfilt)
star_pix = np.array([result['Star X']+boxat[0]-box_size,
result['Star Y']+boxat[1]-box_size])
fitted_box_pix = np.array([result['Box X']+boxat[0]-box_size,
result['Box Y']+boxat[1]-box_size])
slitang = 0.22*np.pi/180
targ_pix_im = (result['Box X']-np.sin(slitang)*targ_pos,
result['Box Y']+np.cos(slitang)*targ_pos)
targ_pix = np.array([targ_pix_im[0]+boxat[0]-box_size,
targ_pix_im[1]+boxat[1]-box_size])
pixels.append(list(star_pix))
targets.append(list(targ_pix))
pix_err = targ_pix - star_pix
pos_err = pix_err*pixelscale
if plot == True:
cxy = (result['Star X'], result['Star Y'])
c = plt.Circle(cxy, result['FWHM pix'], linewidth=2, ec='g', fc='none', alpha=0.3)
ax = plt.gca()
ax.add_artist(c)
plt.plot(result['Star X'], result['Star Y'], 'g.')
# plt.plot(result['Box X'], result['Box Y'], 'y+', alpha=0.5, ms=10)
plt.plot(targ_pix_im[0], targ_pix_im[1], 'rx', alpha=0.5)
if verbose:
print(f"Alignment Box {i+1} results:")
print(f" Sky Amplitude: {result['Sky Amplitude']:.0f} ADU")
print(f" Star Amplitude: {result['Star Amplitude']:.0f} ADU")
print(f" Star FWHM: {result['FWHM arcsec']:.2f}")
print(f" Star Position: {star_pix[0]:.1f}, {star_pix[1]:.1f}")
print(f" Target Position: {targ_pix[0]:.1f}, {targ_pix[1]:.1f}")
print(f" Position Error: {pos_err[0]:+.2f}, {pos_err[1]:+.2f} arcsec")
# except:
# print(f'Alignment Box {i+1} failed: {result}')
if plot == True:
plt.xticks([], [])
plt.yticks([], [])
# Calculate Transformation
off_Xpix, off_Ypix, off_R, err_R, A = fit_transforms(pixels, targets)
off_X = off_Xpix * pixelscale
off_Y = off_Ypix * pixelscale
th_XY = 0.10
th_R = 0.030
send_X = off_X if abs(off_X) > th_XY else 0
send_Y = off_Y if abs(off_Y) > th_XY else 0
send_R = off_R if abs(off_R) > th_R else 0
print()
print(f" Calculated Err Send (Threshold)")
print(f"Offset X = {off_X:+.2f} {send_X:+.2f} arcsec ({th_XY:.2f})")
print(f"Offset Y = {off_Y:+.2f} {send_Y:+.2f} arcsec ({th_XY:.2f})")
print(f"Rotation = {off_R:+.3f} {err_R:.3f} {send_R:+.3f} deg ({th_R:.3f})")
if plot == True:
plt.show()
if __name__ == '__main__':
##-------------------------------------------------------------------------
## Parse Command Line Arguments
##-------------------------------------------------------------------------
## create a parser object for understanding command-line arguments
p = argparse.ArgumentParser(description='''
''')
## add flags
p.add_argument("-v", "--verbose", dest="verbose",
default=False, action="store_true",
help="Be verbose! (default = False)")
p.add_argument("-m", "--medfilt", dest="medfilt",
default=False, action="store_true",
help="Median filter images?")
p.add_argument("-p", "--plot", dest="plot",
default=False, action="store_true",
help="Generate plots?")
## add options
p.add_argument("-d", "--dark", dest="dark", type=str,
help="Dark file to use.")
p.add_argument("-f", "--flat", dest="flat", type=str,
help="Master flat file to use.")
p.add_argument("-s", "--seeing", dest="seeing", type=float,
default=0,
help="Seeing in arcsec.")
## add arguments
p.add_argument('image', type=str,
help="Image file to analyze")
# p.add_argument('allothers', nargs='*',
# help="All other arguments")
args = p.parse_args()
if args.dark is not None:
args.dark = os.path.expanduser(args.dark)
if args.flat is not None:
args.flat = os.path.expanduser(args.flat)
args.image = os.path.expanduser(args.image)
analyze_image(args.image, dark=args.dark, flat=args.flat, box_size=30,
medfilt=args.medfilt, plot=args.plot, seeing=args.seeing)
``` |
{
"source": "joshwalawender/KeckUtilities",
"score": 2
} |
#### File: KeckUtilities/telescopeSchedule/site_use.py
```python
import sys
from pathlib import Path
import numpy as np
from astropy.table import Table, Column, vstack
from datetime import datetime, timedelta
from telescopeSchedule import get_telsched
from matplotlib import pyplot as plt
site_list = sorted(['ANU', 'CIT', 'UCB', 'UCD', 'UCLA', 'UCSD', 'UCI', 'UCR', 'Yale',
'USRA', 'NU', 'HQ', 'IfA', 'Stanford', 'Swinburne', 'UCSB', 'UCSC'])
site_list.append('Other')
group_list = ['UC', 'CIT', 'IfA+US', 'Australia', 'HQ', 'Other']
group_members = {'UC': ['UCB', 'UCD', 'UCLA', 'UCSD', 'UCI', 'UCR', 'UCSB', 'UCSC', 'USCS'],
'IfA+US': ['Yale', 'USRA', 'NU', 'IfA', 'Stanford', 'Northwestern'],
'Australia': ['ANU', 'Swinburne', 'Swin'],
'CIT': ['CIT'],
'Other': ['Other'],
'HQ': ['HQ']}
##-------------------------------------------------------------------------
## Main Program
##-------------------------------------------------------------------------
def get_site_table_single_query(from_date=None, ndays=5):
if ndays > 100:
ndays = 100
sched = get_telsched(from_date=from_date, ndays=ndays, telnr=None)
# site_list.append('Group: UC')
# site_list.append('Group: US')
# site_list.append('Group: Australia')
# site_list.append('Group: CIT')
# site_list.append('Group: Other')
# site_list.append('Group: HQ')
t = Table(names=['Date'] + site_list,
dtype=['a10'] + [int]*len(site_list))
for prog in sched:
if prog['Date'] not in list(t['Date']):
# print(f"Adding {prog['Date']}")
row = {'Date': prog['Date']}
for site in site_list:
row[site] = 0
t.add_row(row)
tonights_sites = prog['Location'].split(',')
# print(prog['ProjCode'], tonights_sites)
t.add_index('Date')
rowid = t.loc_indices[prog['Date']]
for entry in tonights_sites:
if entry in site_list:
t[rowid][entry] += 1
elif entry == 'Swin':
t[rowid]['Swinburne'] += 1
elif entry == 'Northwestern':
t[rowid]['NU'] += 1
elif entry == 'USCS':
t[rowid]['UCSC'] += 1
elif entry == '':
pass
else:
print(f'Unmatched entry: "{entry}"')
return t
def get_site_table(from_date=None, ndays=5):
t = get_site_table_single_query(from_date=from_date, ndays=ndays)
last_date = datetime.strptime(t['Date'][-1], '%Y-%m-%d')
while len(t) < ndays:
need_more_days = ndays - len(t)
print(f"At {last_date.strftime('%Y-%m-%d')}, Need {need_more_days} more days")
new_t = get_site_table_single_query(
from_date=last_date.strftime('%Y-%m-%d'),
ndays=need_more_days)
t = vstack([t, new_t])
last_date = datetime.strptime(t['Date'][-1], '%Y-%m-%d')
return t
def analyze_site_table(t):
# Prepare table with sites grouped by partner
g = Table(names=['Date'] + group_list,
dtype=['a10'] + [int]*len(group_list))
# Add Observer Count Column
observer_count = []
for row in t:
c = 0
for col in row.colnames:
if type(row[col]) == np.int64 and row[col] > 0:
c += row[col]
observer_count.append(c)
grow = [row['Date']]
grow.extend([0]*len(group_list))
g.add_row(grow)
for col in row.colnames:
if type(row[col]) == np.int64 and row[col] > 0:
for group in group_list:
if col in group_members[group]:
g[-1][group] += row[col]
gc = 0
for group in group_list:
if g[-1][group] > 0:
gc += g[-1][group]
if c != gc:
print(c, gc)
t.add_column(Column(data=observer_count, name='Observer Count'))
g.add_column(Column(data=observer_count, name='Observer Count'))
for group in group_list:
frac = g[group]/g['Observer Count']
g.add_column(Column(data=frac, name=f'{group} fraction'))
# Bin groups data
b = Table(names=['Date'] + group_list + ['Observer Count'],
dtype=['a10'] + [int]*(len(group_list)+1))
binsize = 29
nbinnedrows = int(np.floor(len(g)/binsize))
for i in np.arange(0, nbinnedrows, 1):
grows = g[i*binsize:(i+1)*binsize]
brow = [grows[0]['Date']]
brow.extend([0]*(len(group_list)+1))
for j,group in enumerate(group_list):
brow[j+1] = np.sum(grows[group])
brow[-1] = np.sum(grows['Observer Count'])
b.add_row(brow)
for group in group_list:
frac = b[group]/b['Observer Count']
b.add_column(Column(data=frac, name=f'{group} fraction'))
return t, g, b
def plot_grouped_site_use(g):
dates = [datetime.strptime(d, '%Y-%m-%d') for d in g['Date']]
plt.figure(figsize=(16,8))
plt.title('Site Use Over Time')
colors = ['b', 'y', 'k', 'k', 'r', 'g']
alphas = [0.4, 0.4, 0.4, 0.2, 0.2, 0.4]
previous_fracs = np.zeros(len(g))
for i,group in enumerate(group_list):
plt.fill_between(dates,
previous_fracs,
previous_fracs+g[f'{group} fraction'],
facecolor=colors[i], alpha=alphas[i],
step='post',
label=group)
previous_fracs += g[f'{group} fraction']
plt.grid()
plt.ylim(0, 1.0)
plt.ylabel('Fraction of Observers')
plt.legend(loc='upper right')
cax = plt.gca().twinx()
cax.plot_date(dates, g['Observer Count'], 'k-',
label='N Observers',
drawstyle='steps-post')
cax.set_ylabel('Number of Observers')
cax.set_ylim(100, 350)
margin_frac = 0.15
margin_days = (max(dates) - min(dates)).days*margin_frac
plt.xlim(dates[0], dates[-1]+timedelta(days=margin_days))
plt.legend(loc='center right')
plt.savefig('Site_Use_Over_Time.png', bbox_inches='tight')
# plt.show()
if __name__ == '__main__':
from_date = '2018-02-01'
file = Path(f'site_use_from_{from_date}.csv')
ndays = (datetime.now() - datetime.strptime(from_date, '%Y-%m-%d')).days
if file.exists() is False:
print('Querying database')
t = get_site_table(from_date=from_date, ndays=ndays)
t.write(file, format='ascii.csv')
else:
print('Reading file on disk')
t = Table.read(file)
t, g, b = analyze_site_table(t)
plot_grouped_site_use(b)
``` |
{
"source": "joshwalawender/PypeIt",
"score": 2
} |
#### File: doc/scripts/build_specobj_rst.py
```python
import os
import time
import numpy
from pkg_resources import resource_filename
from pypeit.par.parset import ParSet
from pypeit import specobj
from IPython import embed
def link_string(p):
return '`{0} Keywords`_'.format(type(p).__name__)
#-----------------------------------------------------------------------------
if __name__ == '__main__':
t = time.perf_counter()
# Read the baseline file that is not changed and must be edited by
# the person building the documentation as necessary.
pypeit_root = os.path.dirname(resource_filename('pypeit', ''))
input_base = os.path.join(pypeit_root, 'doc', 'scripts', 'base_specobj_rst.txt')
with open(input_base, 'r') as f:
lines = [ l.replace('\n','') for l in f.readlines() ]
lines += ['']
# Start to append the automatically generated documentation
lines += ['Current SpecObj Data Model']
lines += ['++++++++++++++++++++++++++']
lines += ['']
data_model = specobj.data_model
keys = list(data_model.keys())
keys.sort()
data_table = numpy.empty((len(data_model)+1, 4), dtype=object)
data_table[0,:] = ['Key', 'Obj Type', 'Array Type', 'Description']
for i,k in enumerate(keys):
# Key
data_table[i+1,0] = ParSet._data_string(k, use_repr=False, verbatum=True)
# Object Type
if isinstance(data_model[k]['otype'], (list,tuple)):
data_table[i+1,1] = ', '.join([t.__name__ for t in data_model[k]['otype']])
else:
data_table[i+1,1] = data_model[k]['otype'].__name__
# Array type
if 'atype' in data_model[k].keys():
data_table[i+1,2] = data_model[k]['atype'].__name__
else:
data_table[i+1,2] = ' '
# Description
data_table[i+1,3] = ParSet._data_string(data_model[k]['desc'])
lines += [ParSet._data_table_string(data_table, delimeter='rst')]
# Finish
output_rst = os.path.join(pypeit_root, 'doc', 'specobj.rst')
with open(output_rst, 'w') as f:
f.write('\n'.join(lines))
print('Wrote: {}'.format(output_rst))
print('Elapsed time: {0} seconds'.format(time.perf_counter() - t))
```
#### File: PypeIt/pypeit/biasframe.py
```python
import numpy as np
import os
from IPython import embed
from pypeit import msgs
from pypeit import masterframe
from pypeit.par import pypeitpar
from pypeit.images import calibrationimage
from pypeit.images import pypeitimage
class BiasFrame(calibrationimage.CalibrationImage, masterframe.MasterFrame):
"""
Class to generate/load the Bias image or instructions on how to deal
with the bias.
This class is primarily designed to generate a Bias frame for bias
subtraction. It also contains I/O methods for the Master frames of
PypeIt. The build_master() method will return a simple command
(str) if that is the specified parameter (`par['useframe']`).
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
Spectrograph used to take the data.
files (:obj:`list`, optional):
List of filenames to process.
det (:obj:`int`, optional):
The 1-indexed detector number to process.
par (:class:`pypeit.par.pypeitpar.FrameGroupPar`, optional):
The parameters used to process the frames. If None, set
to::
pypeitpar.FrameGroupPar('bias')
master_key (:obj:`str`, optional):
The string identifier for the instrument configuration. See
:class:`pypeit.masterframe.MasterFrame`.
master_dir (:obj:`str`, optional):
Path to master frames
reuse_masters (:obj:`bool`, optional):
Load from disk if possible
"""
# Frame type is a class attribute
frametype = 'bias'
master_type = 'Bias'
@classmethod
def from_master_file(cls, master_file, par=None):
"""
Instantiate from a master file
Args:
master_file (str):
par (:class:`pypeit.par.pypeitpar.FrameGroupPar`, optional):
Returns:
biasframe.BiasFrame:
The PypeItImage is loaded into self.pypeitImage
"""
# Spectrograph
spectrograph, extras = masterframe.items_from_master_file(master_file)
head0 = extras[0]
# Master info
master_dir = head0['MSTRDIR']
master_key = head0['MSTRKEY']
# Instantiate
slf = cls(spectrograph, par=par, master_dir=master_dir, master_key=master_key,
reuse_masters=True)
slf.pypeitImage = slf.load(ifile=master_file)
# Return
return slf
# Keep order same as processimages (or else!)
def __init__(self, spectrograph, files=None, det=1, par=None, master_key=None,
master_dir=None, reuse_masters=False):
# Parameters
self.par = pypeitpar.FrameGroupPar(self.frametype) if par is None else par
# Start us up
calibrationimage.CalibrationImage.__init__(self, spectrograph, det, self.par['process'], files=files)
# MasterFrames: Specifically pass the ProcessImages-constructed
# spectrograph even though it really only needs the string name
masterframe.MasterFrame.__init__(self, self.master_type, master_dir=master_dir,
master_key=master_key, reuse_masters=reuse_masters)
# Processing steps
self.process_steps = []
if self.par['process']['overscan'].lower() != 'none':
self.process_steps.append('subtract_overscan')
self.process_steps += ['trim']
self.process_steps += ['orient']
def build_image(self, overwrite=False, trim=True):
"""
Grab the bias files (as needed) and then process the input bias
frames with :func:`pypeit.processimages.ProcessImages.process`.
Args:
overwrite: (:obj: `bool`, optional):
Regenerate the combined image
trim (:obj:`bool`, optional):
If True, trim the image
Returns:
`numpy.ndarray`_: Combined, processed image.
"""
# Nothing?
if self.par['useframe'].lower() == 'none':
msgs.info("Bias image subtraction not activated.")
return None
if self.nfiles == 0:
msgs.info("No bias frames provided. No bias image will be generated or used")
return None
# Build
self.pypeItImage = super(BiasFrame, self).build_image(ignore_saturation=True)
self.pypeitImage.ivar = None # Zero this out as it non-sensical
# Return
return self.pypeItImage
def save(self, outfile=None, overwrite=True):
"""
Save the bias master data.
Args:
outfile (:obj:`str`, optional):
Name for the output file. Defaults to
:attr:`file_path`.
overwrite (:obj:`bool`, optional):
Overwrite any existing file.
"""
# Some checks
if self.pypeitImage is None:
msgs.warn('No MasterBias to save!')
return
if not self.pypeitImage.validate():
msgs.warn('MasterBias is not a proper image.')
return
# Proceed
_outfile = self.master_file_path if outfile is None else outfile
# Check if it exists
if os.path.exists(_outfile) and not overwrite:
msgs.warn('Master file exists: {0}'.format(_outfile) + msgs.newline()
+ 'Set overwrite=True to overwrite it.')
return
# Save
hdr = self.build_master_header(steps=self.process_steps, raw_files=self.file_list)
self.pypeitImage.write(_outfile, hdr=hdr, iext='BIAS')
msgs.info('Master frame written to {0}'.format(_outfile))
#super(BiasFrame, self).save(self.pypeitImage, 'BIAS', outfile=outfile, overwrite=overwrite,
# raw_files=self.file_list, steps=self.process_steps)
def load(self, ifile=None):
"""
Load the bias frame according to how par['useframe'] is set.
Args:
ifile (:obj:`str`, optional):
Name of the master frame file. Defaults to
:attr:`file_path`.
Returns:
Returns either the `numpy.ndarray`_ with the bias image
or None if no bias is to be subtracted.
"""
# How are we treating biases?
# 1) No bias subtraction
if self.par['useframe'].lower() == 'none':
msgs.info("Will not perform bias/dark subtraction")
return None
# 2) Use overscan
if self.par['useframe'] == 'overscan':
msgs.error("useframe=overscan was Deprecated. Remove it from your pypeit file")
# 3) User wants bias subtractions
if self.par['useframe'] in ['bias', 'dark']:
# Check on whether to reuse and whether the file exists
master_file = self.chk_load_master(ifile)
if master_file is None:
return
else: # Load
self.pypeitImage = pypeitimage.PypeItImage.from_file(master_file)
return self.pypeitImage
#return super(BiasFrame, self).load('BIAS', ifile=ifile, is_pypeitImage=True)
```
#### File: pypeit/core/coadd2d.py
```python
import os
import copy
from IPython import embed
import numpy as np
import scipy
from matplotlib import pyplot as plt
from astropy.io import fits
from pypeit import msgs
from pypeit import utils
from pypeit import ginga
from pypeit import specobjs
from pypeit.masterframe import MasterFrame
from pypeit.waveimage import WaveImage
from pypeit.wavetilts import WaveTilts
from pypeit import specobjs
from pypeit import edgetrace
from pypeit import reduce
from pypeit.core import extract
from pypeit.core import load, coadd1d, pixels
from pypeit.core import parse
from pypeit.core import combine
from pypeit.images import scienceimage
from pypeit.spectrographs import util
from pypeit import calibrations
#def reference_trace_stack(slitid, stack_dict, offsets=None, objid=None):
# """
# Utility function for determining the reference trace about which 2d coadds are performed.
# There are two modes of operation to determine the reference trace for the 2d coadd of a given slit/order:
#
# 1) offsets: we stack about the center of the slit for the slit in question with the input offsets added
# 2) ojbid: we stack about the trace ofa reference object for this slit given for each exposure by the input objid
#
# Either offsets or objid must be provided, but the code will raise an exception if both are provided.
#
# Args:
# slitid (int):
# The slit or order that we are currently considering
# stack_dict (dict):
# Dictionary containing all the images and keys required for perfomring 2d coadds.
# offsets (list or np.ndarray):
# An array of offsets with the same dimensionality as the nexp, the numer of images being coadded.
# objid: (list or np.ndarray):
# An array of objids with the same dimensionality as the nexp, the number of images being coadded.
#
# Returns:
# ref_trace_stack
#
# ref_trace_stack (np.ndarray):
# An array with shape (nspec, nexp) containing the reference trace for each of the nexp exposures.
#
# """
#
# if offsets is not None and objid is not None:
# msgs.errror('You can only input offsets or an objid, but not both')
# nexp = len(offsets) if offsets is not None else len(objid)
# if offsets is not None:
# tslits_dict_list = stack_dict['tslits_dict_list']
# nspec, nslits = tslits_dict_list[0]['slit_left'].shape
# ref_trace_stack = np.zeros((nspec, nexp))
# for iexp, tslits_dict in enumerate(tslits_dict_list):
# ref_trace_stack[:, iexp] = (tslits_dict[:, slitid]['slit_left'] + tslits_dict[:, slitid]['slit_righ'])/2.0 + offsets[iexp]
# elif objid is not None:
# specobjs_list = stack_dict['specobjs_list']
# nspec = specobjs_list[0][0].TRACE_SPAT.shape[0]
# # Grab the traces, flux, wavelength and noise for this slit and objid.
# ref_trace_stack = np.zeros((nspec, nexp), dtype=float)
# for iexp, sobjs in enumerate(specobjs_list):
# # TODO Should be as in optimal_weights
# ithis = (sobjs.SLITID == slitid) & (sobjs.OBJID == objid[iexp])
# ref_trace_stack[:, iexp] = sobjs[ithis].TRACE_SPAT
# else:
# msgs.error('You must input either offsets or an objid to determine the stack of reference traces')
#
# return ref_trace_stack
#def optimal_weights(specobjs_list, slitid, objid, sn_smooth_npix, const_weights=False):
# """
# Determine optimal weights for 2d coadds. This script grabs the information from SpecObjs list for the
# object with specified slitid and objid and passes to coadd.sn_weights to determine the optimal weights for
# each exposure.
#
# Args:
# specobjs_list (list):
# list of SpecObjs objects contaning the objects that were extracted from each frame that will contribute
# to the coadd.
# slitid (int):
# The slitid that has the brightest object whose S/N will be used to determine the weight for each frame.
# objid (int):
# The objid index of the brightest object whose S/N will be used to determine the weight for each frame.
# sn_smooth_npix (float):
# Number of pixels used for determining smoothly varying S/N ratio weights.
# const_weights (bool):
# Use constant weights for coadding the exposures. Default=False
#
# Returns:
# rms_sn, weights
#
# rms_sn : ndarray, shape = (len(specobjs_list),)
# Root mean square S/N value for each input spectra
# weights : ndarray, shape (len(specobjs_list),)
# Weights to be applied to the spectra. These are signal-to-noise squared weights.
# """
#
# nexp = len(specobjs_list)
# nspec = specobjs_list[0][0].TRACE_SPAT.shape[0]
# # Grab the traces, flux, wavelength and noise for this slit and objid.
# flux_stack = np.zeros((nspec, nexp), dtype=float)
# ivar_stack = np.zeros((nspec, nexp), dtype=float)
# wave_stack = np.zeros((nspec, nexp), dtype=float)
# mask_stack = np.zeros((nspec, nexp), dtype=bool)
#
# for iexp, sobjs in enumerate(specobjs_list):
# embed()
# try:
# ithis = (sobjs.SLITID == slitid) & (sobjs.OBJID == objid[iexp])
# except AttributeError:
# ithis = (sobjs.ECH_ORDERINDX == slitid) & (sobjs.ECH_OBJID == objid[iexp])
# try:
# flux_stack[:,iexp] = sobjs[ithis][0].OPT_COUNTS
# except:
# embed(header='104')
# ivar_stack[:,iexp] = sobjs[ithis][0].OPT_COUNTS_IVAR
# wave_stack[:,iexp] = sobjs[ithis][0].OPT_WAVE
# mask_stack[:,iexp] = sobjs[ithis][0].OPT_MASK
#
# # TODO For now just use the zero as the reference for the wavelengths? Perhaps we should be rebinning the data though?
# rms_sn, weights = coadd1d.sn_weights(wave_stack, flux_stack, ivar_stack, mask_stack, sn_smooth_npix,
# const_weights=const_weights)
# return rms_sn, weights.T
def det_error_msg(exten, sdet):
# Print out error message if extension is not found
msgs.error("Extension {:s} for requested detector {:s} was not found.\n".format(exten) +
" Maybe you chose the wrong detector to coadd? "
"Set with --det= or check file contents with pypeit_show_2dspec Science/spec2d_XXX --list".format(sdet))
def get_wave_ind(wave_grid, wave_min, wave_max):
"""
Utility routine used by coadd2d to determine the starting and ending indices of a wavelength grid.
Args:
wave_grid: float ndarray
Wavelength grid.
wave_min: float
Minimum wavelength covered by the data in question.
wave_max: float
Maximum wavelength covered by the data in question.
Returns:
tuple: Returns (ind_lower, ind_upper), Integer lower and upper
indices into the array wave_grid that cover the interval
(wave_min, wave_max)
"""
diff = wave_grid - wave_min
diff[diff > 0] = np.inf
if not np.any(diff < 0):
ind_lower = 0
msgs.warn('Your wave grid does not extend blue enough. Taking bluest point')
else:
ind_lower = np.argmin(np.abs(diff))
diff = wave_max - wave_grid
diff[diff > 0] = np.inf
if not np.any(diff < 0):
ind_upper = wave_grid.size-1
msgs.warn('Your wave grid does not extend red enough. Taking reddest point')
else:
ind_upper = np.argmin(np.abs(diff))
return ind_lower, ind_upper
def get_wave_bins(thismask_stack, waveimg_stack, wave_grid):
# Determine the wavelength grid that we will use for the current slit/order
# TODO This cut on waveimg_stack should not be necessary
wavemask = thismask_stack & (waveimg_stack > 1.0)
wave_lower = waveimg_stack[wavemask].min()
wave_upper = waveimg_stack[wavemask].max()
ind_lower, ind_upper = get_wave_ind(wave_grid, wave_lower, wave_upper)
wave_bins = wave_grid[ind_lower:ind_upper + 1]
return wave_bins
def get_spat_bins(thismask_stack, trace_stack):
nimgs, nspec, nspat = thismask_stack.shape
# Create the slit_cen_stack and determine the minimum and maximum
# spatial offsets that we need to cover to determine the spatial
# bins
spat_img = np.outer(np.ones(nspec), np.arange(nspat))
dspat_stack = np.zeros_like(thismask_stack,dtype=float)
spat_min = np.inf
spat_max = -np.inf
for img in range(nimgs):
# center of the slit replicated spatially
slit_cen_img = np.outer(trace_stack[:, img], np.ones(nspat))
dspat_iexp = (spat_img - slit_cen_img)
dspat_stack[img, :, :] = dspat_iexp
thismask_now = thismask_stack[img, :, :]
spat_min = np.fmin(spat_min, dspat_iexp[thismask_now].min())
spat_max = np.fmax(spat_max, dspat_iexp[thismask_now].max())
spat_min_int = int(np.floor(spat_min))
spat_max_int = int(np.ceil(spat_max))
dspat_bins = np.arange(spat_min_int, spat_max_int + 1, 1,dtype=float)
return dspat_bins, dspat_stack
def compute_coadd2d(ref_trace_stack, sciimg_stack, sciivar_stack, skymodel_stack, inmask_stack, tilts_stack,
thismask_stack, waveimg_stack, wave_grid, weights='uniform'):
"""
Construct a 2d co-add of a stack of PypeIt spec2d reduction outputs.
Slits are 'rectified' onto a spatial and spectral grid, which
encompasses the spectral and spatial coverage of the image stacks.
The rectification uses nearest grid point interpolation to avoid
covariant errors. Dithering is supported as all images are centered
relative to a set of reference traces in trace_stack.
Args:
trace_stack (`numpy.ndarray`_):
Stack of reference traces about which the images are
rectified and coadded. If the images were not dithered then
this reference trace can simply be the center of the slit::
slitcen = (slit_left + slit_righ)/2
If the images were dithered, then this object can either be
the slitcen appropriately shifted with the dither pattern,
or it could be the trace of the object of interest in each
exposure determined by running PypeIt on the individual
images. Shape is (nimgs, nspec).
sciimg_stack (`numpy.ndarray`_):
Stack of science images. Shape is (nimgs, nspec, nspat).
sciivar_stack (`numpy.ndarray`_):
Stack of inverse variance images. Shape is (nimgs, nspec,
nspat).
skymodel_stack (`numpy.ndarray`_):
Stack of the model sky. Shape is (nimgs, nspec, nspat).
inmask_stack (`numpy.ndarray`_):
Boolean array with the input masks for each image; `True`
values are *good*, `False` values are *bad*. Shape is
(nimgs, nspec, nspat).
tilts_stack (`numpy.ndarray`_):
Stack of the wavelength tilts traces. Shape is (nimgs,
nspec, nspat).
waveimg_stack (`numpy.ndarray`_):
Stack of the wavelength images. Shape is (nimgs, nspec,
nspat).
thismask_stack (`numpy.ndarray`_):
Boolean array with the masks indicating which pixels are on
the slit in question. `True` values are on the slit;
`False` values are off the slit. Shape is (nimgs, nspec,
nspat).
weights (`numpy.ndarray`_, optional):
The weights used when combining the rectified images (see
:func:`weighted_combine`). If no weights are provided,
uniform weighting is used. Weights are broadast to the
correct size of the image stacks (see
:func:`broadcast_weights`), as necessary. Shape must be
(nimgs,), (nimgs, nspec), or (nimgs, nspec, nspat).
loglam_grid (`numpy.ndarray`_, optional):
Wavelength grid in log10(wave) onto which the image stacks
will be rectified. The code will automatically choose the
subset of this grid encompassing the wavelength coverage of
the image stacks provided (see :func:`waveimg_stack`).
Either `loglam_grid` or `wave_grid` must be provided.
wave_grid (`numpy.ndarray`_, optional):
Same as `loglam_grid` but in angstroms instead of
log(angstroms). (TODO: Check units...)
Returns:
tuple: Returns the following (TODO: This needs to be updated):
- sciimg: float ndarray shape = (nspec_coadd, nspat_coadd):
Rectified and coadded science image
- sciivar: float ndarray shape = (nspec_coadd, nspat_coadd):
Rectified and coadded inverse variance image with correct
error propagation
- imgminsky: float ndarray shape = (nspec_coadd,
nspat_coadd): Rectified and coadded sky subtracted image
- outmask: bool ndarray shape = (nspec_coadd, nspat_coadd):
Output mask for rectified and coadded images. True = Good,
False=Bad.
- nused: int ndarray shape = (nspec_coadd, nspat_coadd):
Image of integers indicating the number of images from the
image stack that contributed to each pixel
- tilts: float ndarray shape = (nspec_coadd, nspat_coadd):
The averaged tilts image corresponding to the rectified
and coadded data.
- waveimg: float ndarray shape = (nspec_coadd, nspat_coadd):
The averaged wavelength image corresponding to the
rectified and coadded data.
- dspat: float ndarray shape = (nspec_coadd, nspat_coadd):
The average spatial offsets in pixels from the reference
trace trace_stack corresponding to the rectified and
coadded data.
- thismask: bool ndarray shape = (nspec_coadd, nspat_coadd):
Output mask for rectified and coadded images. True = Good,
False=Bad. This image is trivial, and is simply an image
of True values the same shape as the rectified and coadded
data.
- tslits_dict: dict: tslits_dict dictionary containing the
information about the slits boundaries. The slit
boundaries are trivial and are simply vertical traces at 0
and nspat_coadd-1.
"""
nimgs, nspec, nspat = sciimg_stack.shape
if 'uniform' in weights:
msgs.info('No weights were provided. Using uniform weights.')
weights = np.ones(nimgs)/float(nimgs)
weights_stack = combine.broadcast_weights(weights, sciimg_stack.shape)
# Determine the wavelength grid that we will use for the current slit/order
wave_bins = get_wave_bins(thismask_stack, waveimg_stack, wave_grid)
dspat_bins, dspat_stack = get_spat_bins(thismask_stack, ref_trace_stack)
sci_list = [weights_stack, sciimg_stack, sciimg_stack - skymodel_stack, tilts_stack,
waveimg_stack, dspat_stack]
var_list = [utils.calc_ivar(sciivar_stack)]
sci_list_rebin, var_list_rebin, norm_rebin_stack, nsmp_rebin_stack \
= rebin2d(wave_bins, dspat_bins, waveimg_stack, dspat_stack, thismask_stack,
inmask_stack, sci_list, var_list)
# Now compute the final stack with sigma clipping
sigrej = 3.0
maxiters = 10
# sci_list_rebin[0] = rebinned weights image stack
# sci_list_rebin[1:] = stacks of images that we want to weighted combine
# sci_list_rebin[2] = rebinned sciimg-sky_model images that we used for the sigma clipping
sci_list_out, var_list_out, outmask, nused \
= combine.weighted_combine(sci_list_rebin[0], sci_list_rebin[1:], var_list_rebin,
norm_rebin_stack != 0, sigma_clip=True,
sigma_clip_stack=sci_list_rebin[2], sigrej=sigrej,
maxiters=maxiters)
sciimg, imgminsky, tilts, waveimg, dspat = sci_list_out
sciivar = utils.calc_ivar(var_list_out[0])
# Compute the midpoints vectors, and lower/upper bins of the rectified image
wave_mid = ((wave_bins + np.roll(wave_bins,1))/2.0)[1:]
wave_min = wave_bins[:-1]
wave_max = wave_bins[1:]
dspat_mid = ((dspat_bins + np.roll(dspat_bins,1))/2.0)[1:]
# Interpolate the dspat images wherever the coadds are masked
# because a given pixel was not sampled. This is done because the
# dspat image is not allowed to have holes if it is going to work
# with local_skysub_extract
nspec_coadd, nspat_coadd = imgminsky.shape
spat_img_coadd, spec_img_coadd = np.meshgrid(np.arange(nspat_coadd), np.arange(nspec_coadd))
if np.any(np.invert(outmask)):
points_good = np.stack((spec_img_coadd[outmask], spat_img_coadd[outmask]), axis=1)
points_bad = np.stack((spec_img_coadd[np.invert(outmask)],
spat_img_coadd[np.invert(outmask)]), axis=1)
values_dspat = dspat[outmask]
dspat_bad = scipy.interpolate.griddata(points_good, values_dspat, points_bad,
method='cubic')
dspat[np.invert(outmask)] = dspat_bad
# Points outside the convex hull of the data are set to nan. We
# identify those and simply assume them values from the
# dspat_img_fake, which is what dspat would be on a regular
# perfectly rectified image grid.
nanpix = np.isnan(dspat)
if np.any(nanpix):
dspat_img_fake = spat_img_coadd + dspat_mid[0]
dspat[nanpix] = dspat_img_fake[nanpix]
return dict(wave_bins=wave_bins, dspat_bins=dspat_bins, wave_mid=wave_mid, wave_min=wave_min,
wave_max=wave_max, dspat_mid=dspat_mid, sciimg=sciimg, sciivar=sciivar,
imgminsky=imgminsky, outmask=outmask, nused=nused, tilts=tilts, waveimg=waveimg,
dspat=dspat, nspec=imgminsky.shape[0], nspat=imgminsky.shape[1])
def rebin2d(spec_bins, spat_bins, waveimg_stack, spatimg_stack, thismask_stack, inmask_stack, sci_list, var_list):
"""
Rebin a set of images and propagate variance onto a new spectral and spatial grid. This routine effectively
"recitifies" images using np.histogram2d which is extremely fast and effectiveluy performs
nearest grid point interpolation.
Args:
spec_bins: float ndarray, shape = (nspec_rebin)
Spectral bins to rebin to.
spat_bins: float ndarray, shape = (nspat_rebin)
Spatial bins to rebin to.
waveimg_stack: float ndarray, shape = (nimgs, nspec, nspat)
Stack of nimgs wavelength images with shape = (nspec, nspat) each
spatimg_stack: float ndarray, shape = (nimgs, nspec, nspat)
Stack of nimgs spatial position images with shape = (nspec, nspat) each
thismask_stack: bool ndarray, shape = (nimgs, nspec, nspat)
Stack of nimgs images with shape = (nspec, nspat) indicating the locatons on the pixels on an image that
are on the slit in question.
inmask_stack: bool ndarray, shape = (nimgs, nspec, nspat)
Stack of nimgs images with shape = (nspec, nspat) indicating which pixels on an image are masked.
True = Good, False = Bad
sci_list: list
List of float ndarray images (each being an image stack with shape (nimgs, nspec, nspat))
which are to be rebinned onto the new spec_bins, spat_bins
var_list: list
List of float ndarray variance images (each being an image stack with shape (nimgs, nspec, nspat))
which are to be rebbinned with proper erorr propagation
Returns:
tuple: Returns the following:
- sci_list_out: list: The list of ndarray rebinned images
with new shape (nimgs, nspec_rebin, nspat_rebin)
- var_list_out: list: The list of ndarray rebinned variance
images with correct error propagation with shape (nimgs,
nspec_rebin, nspat_rebin)
- norm_rebin_stack: int ndarray, shape (nimgs, nspec_rebin,
nspat_rebin): An image stack indicating the integer
occupation number of a given pixel. In other words, this
number would be zero for empty bins, one for bins that
were populated by a single pixel, etc. This image takes
the input inmask_stack into account. The output mask for
each image can be formed via outmask_rebin_satck =
(norm_rebin_stack > 0)
- nsmp_rebin_stack: int ndarray, shape (nimgs, nspec_rebin,
nspat_rebin): An image stack indicating the integer
occupation number of a given pixel taking only the
thismask_stack into account, but taking the inmask_stack
into account. This image is mainly constructed for
bookeeping purposes, as it represents the number of times
each pixel in the rebin image was populated taking only
the "geometry" of the rebinning into account (i.e. the
thismask_stack), but not the masking (inmask_stack).
"""
shape = combine.img_list_error_check(sci_list, var_list)
nimgs = shape[0]
# allocate the output mages
nspec_rebin = spec_bins.size - 1
nspat_rebin = spat_bins.size - 1
shape_out = (nimgs, nspec_rebin, nspat_rebin)
nsmp_rebin_stack = np.zeros(shape_out)
norm_rebin_stack = np.zeros(shape_out)
sci_list_out = []
for ii in range(len(sci_list)):
sci_list_out.append(np.zeros(shape_out))
var_list_out = []
for jj in range(len(var_list)):
var_list_out.append(np.zeros(shape_out))
for img in range(nimgs):
# This fist image is purely for bookeeping purposes to determine the number of times each pixel
# could have been sampled
thismask = thismask_stack[img, :, :]
spec_rebin_this = waveimg_stack[img, :, :][thismask]
spat_rebin_this = spatimg_stack[img, :, :][thismask]
nsmp_rebin_stack[img, :, :], spec_edges, spat_edges = np.histogram2d(spec_rebin_this, spat_rebin_this,
bins=[spec_bins, spat_bins], density=False)
finmask = thismask & inmask_stack[img,:,:]
spec_rebin = waveimg_stack[img, :, :][finmask]
spat_rebin = spatimg_stack[img, :, :][finmask]
norm_img, spec_edges, spat_edges = np.histogram2d(spec_rebin, spat_rebin,
bins=[spec_bins, spat_bins], density=False)
norm_rebin_stack[img, :, :] = norm_img
# Rebin the science images
for indx, sci in enumerate(sci_list):
weigh_sci, spec_edges, spat_edges = np.histogram2d(spec_rebin, spat_rebin,
bins=[spec_bins, spat_bins], density=False,
weights=sci[img,:,:][finmask])
sci_list_out[indx][img, :, :] = (norm_img > 0.0) * weigh_sci/(norm_img + (norm_img == 0.0))
# Rebin the variance images, note the norm_img**2 factor for correct error propagation
for indx, var in enumerate(var_list):
weigh_var, spec_edges, spat_edges = np.histogram2d(spec_rebin, spat_rebin,
bins=[spec_bins, spat_bins], density=False,
weights=var[img, :, :][finmask])
var_list_out[indx][img, :, :] = (norm_img > 0.0)*weigh_var/(norm_img + (norm_img == 0.0))**2
return sci_list_out, var_list_out, norm_rebin_stack.astype(int), nsmp_rebin_stack.astype(int)
# TODO Break up into separate methods?
class Coadd2d(object):
"""
Main routine to run the extraction for 2d coadds.
Algorithm steps are as follows:
- Fill this in.
This performs 2d coadd specific tasks, and then also performs some
of the tasks analogous to the pypeit.extract_one method. Docs coming
soon....
Args:
stack_dict:
master_dir:
det (int):
samp_fact: float
sampling factor to make the wavelength grid finer or coarser. samp_fact > 1.0 oversamples (finer),
samp_fact < 1.0 undersamples (coarser)
ir_redux:
par:
show:
show_peaks:
"""
def __init__(self, spec2d_files, spectrograph, det=1, offsets=None, weights='auto', sn_smooth_npix=None, par=None,
ir_redux=False, show=False, show_peaks=False, debug_offsets=False, debug=False, **kwargs_wave):
"""
TODO: These args should be in the previous doc string.
Args:
spec2d_files:
det:
offsets (ndarray): default=None
Spatial offsets to be applied to each image before coadding. For the default mode of None, images
are registered automatically using the trace of the brightest object.
weights (str, list or ndarray):
Mode for the weights used to coadd images. Options are 'auto' (default), 'uniform', or list/array of
weights with shape = (nexp,) can be input and will be applied to the image. Note 'auto' is not allowed
if offsets are input, and if set this will cause an exception.
sn_smooth_npix:
ir_redux:
par:
std:
show:
show_peaks:
debug:
**kwargs_wave:
"""
## Use Cases:
# 1) offsets is None -- auto compute offsets from brightest object, so then default to auto_weights=True
# 2) offsets not None, weights = None (uniform weighting) or weights is not None (input weights)
# 3) offsets not None, auto_weights=True (Do not support)
if offsets is not None and 'auto' in weights:
msgs.error("Automatic weights cannot be computed for input offsets. "
"Set weights='uniform' or input an array of weights with shape (nexp,)")
self.spec2d_files = spec2d_files
self.spectrograph = spectrograph
self.det = det
self.offsets = offsets
self.weights = weights
self.ir_redux = ir_redux
self.show = show
self.show_peaks = show_peaks
self.debug_offsets = debug_offsets
self.debug = debug
self.stack_dict = None
self.psuedo_dict = None
self.objid_bri = None
self.slitid_bri = None
self.snr_bar_bri = None
# Load the stack_dict
self.stack_dict = self.load_coadd2d_stacks(self.spec2d_files)
self.pypeline = self.spectrograph.pypeline
self.par = self.spectrograph.default_pypeit_par() if par is None else par
# Check that there are the same number of slits on every exposure
nslits_list = []
for tslits_dict in self.stack_dict['tslits_dict_list']:
nspec, nslits_now = tslits_dict['slit_left'].shape
nslits_list.append(nslits_now)
if not len(set(nslits_list))==1:
msgs.error('Not all of your exposures have the same number of slits. Check your inputs')
self.nslits = nslits_list[0]
self.nexp = len(self.stack_dict['specobjs_list'])
self.nspec = nspec
self.binning = np.array([self.stack_dict['tslits_dict_list'][0]['binspectral'],
self.stack_dict['tslits_dict_list'][0]['binspatial']])
# If smoothing is not input, smooth by 10% of the spectral dimension
self.sn_smooth_npix = sn_smooth_npix if sn_smooth_npix is not None else 0.1*self.nspec
def optimal_weights(self, slitorderid, objid, const_weights=False):
"""
Determine optimal weights for 2d coadds. This script grabs the information from SpecObjs list for the
object with specified slitid and objid and passes to coadd.sn_weights to determine the optimal weights for
each exposure.
Args:
slitorderid (int):
The slit or order id that has the brightest object whose S/N will be used to determine the weight for each frame.
objid (np.ndarray):
Array of object indices with shape = (nexp,) of the brightest object whose S/N will be used to determine the weight for each frame.
const_weights (bool):
Use constant weights for coadding the exposures. Default=False
Returns:
rms_sn : ndarray, shape = (len(specobjs_list),)
Root mean square S/N value for each input spectra
weights : ndarray, shape (len(specobjs_list),)
Weights to be applied to the spectra. These are signal-to-noise squared weights.
"""
nexp = len(self.stack_dict['specobjs_list'])
nspec = self.stack_dict['specobjs_list'][0][0].TRACE_SPAT.shape[0]
# Grab the traces, flux, wavelength and noise for this slit and objid.
flux_stack = np.zeros((nspec, nexp), dtype=float)
ivar_stack = np.zeros((nspec, nexp), dtype=float)
wave_stack = np.zeros((nspec, nexp), dtype=float)
mask_stack = np.zeros((nspec, nexp), dtype=bool)
for iexp, sobjs in enumerate(self.stack_dict['specobjs_list']):
ithis = sobjs.slitorder_objid_indices(slitorderid, objid[iexp])
flux_stack[:, iexp] = sobjs[ithis].OPT_COUNTS
ivar_stack[:, iexp] = sobjs[ithis].OPT_COUNTS_IVAR
wave_stack[:, iexp] = sobjs[ithis].OPT_WAVE
mask_stack[:, iexp] = sobjs[ithis].OPT_MASK
# TODO For now just use the zero as the reference for the wavelengths? Perhaps we should be rebinning the data though?
rms_sn, weights = coadd1d.sn_weights(wave_stack, flux_stack, ivar_stack, mask_stack, self.sn_smooth_npix,
const_weights=const_weights)
return rms_sn, weights.T
def coadd(self, only_slits=None):
only_slits = [only_slits] if (only_slits is not None and
isinstance(only_slits, (int, np.int, np.int64, np.int32))) else only_slits
good_slits = np.arange(self.nslits) if only_slits is None else only_slits
coadd_list = []
for islit in good_slits:
msgs.info('Performing 2d coadd for slit: {:d}/{:d}'.format(islit, self.nslits - 1))
ref_trace_stack = self.reference_trace_stack(islit, offsets=self.offsets, objid=self.objid_bri)
thismask_stack = self.stack_dict['slitmask_stack'] == islit
# TODO Can we get rid of this one line simply making the weights returned by parse_weights an
# (nslit, nexp) array?
# This one line deals with the different weighting strategies between MultiSlit echelle. Otherwise, we
# would need to copy this method twice in the subclasses
if 'auto_echelle' in self.use_weights:
rms_sn, weights = self.optimal_weights(islit, self.objid_bri)
else:
weights = self.use_weights
# Perform the 2d coadd
coadd_dict = compute_coadd2d(ref_trace_stack, self.stack_dict['sciimg_stack'],
self.stack_dict['sciivar_stack'],
self.stack_dict['skymodel_stack'], self.stack_dict['mask_stack'] == 0,
self.stack_dict['tilts_stack'], thismask_stack,
self.stack_dict['waveimg_stack'],
self.wave_grid, weights=weights)
coadd_list.append(coadd_dict)
return coadd_list
def create_psuedo_image(self, coadd_list):
""" THIS UNDOCUMENTED CODE PROBABLY SHOULD GENERATE AND RETURN
STANDARD PYPEIT OBJCTS INSTEAD OF SOME UNDEFINED DICT"""
nspec_vec = np.zeros(self.nslits,dtype=int)
nspat_vec = np.zeros(self.nslits,dtype=int)
for islit, cdict in enumerate(coadd_list):
nspec_vec[islit]=cdict['nspec']
nspat_vec[islit]=cdict['nspat']
# Determine the size of the psuedo image
nspat_pad = 10
nspec_psuedo = nspec_vec.max()
nspat_psuedo = np.sum(nspat_vec) + (self.nslits + 1)*nspat_pad
spec_vec_psuedo = np.arange(nspec_psuedo)
shape_psuedo = (nspec_psuedo, nspat_psuedo)
imgminsky_psuedo = np.zeros(shape_psuedo)
sciivar_psuedo = np.zeros(shape_psuedo)
waveimg_psuedo = np.zeros(shape_psuedo)
tilts_psuedo = np.zeros(shape_psuedo)
spat_img_psuedo = np.zeros(shape_psuedo)
nused_psuedo = np.zeros(shape_psuedo, dtype=int)
inmask_psuedo = np.zeros(shape_psuedo, dtype=bool)
wave_mid = np.zeros((nspec_psuedo, self.nslits))
wave_mask = np.zeros((nspec_psuedo, self.nslits),dtype=bool)
wave_min = np.zeros((nspec_psuedo, self.nslits))
wave_max = np.zeros((nspec_psuedo, self.nslits))
dspat_mid = np.zeros((nspat_psuedo, self.nslits))
spat_left = nspat_pad
slit_left = np.zeros((nspec_psuedo, self.nslits))
slit_righ = np.zeros((nspec_psuedo, self.nslits))
spec_min1 = np.zeros(self.nslits)
spec_max1 = np.zeros(self.nslits)
nspec_grid = self.wave_grid_mid.size
for islit, coadd_dict in enumerate(coadd_list):
spat_righ = spat_left + nspat_vec[islit]
ispec = slice(0,nspec_vec[islit])
ispat = slice(spat_left,spat_righ)
imgminsky_psuedo[ispec, ispat] = coadd_dict['imgminsky']
sciivar_psuedo[ispec, ispat] = coadd_dict['sciivar']
waveimg_psuedo[ispec, ispat] = coadd_dict['waveimg']
tilts_psuedo[ispec, ispat] = coadd_dict['tilts']
# spat_img_psuedo is the sub-pixel image position on the rebinned psuedo image
inmask_psuedo[ispec, ispat] = coadd_dict['outmask']
image_temp = (coadd_dict['dspat'] - coadd_dict['dspat_mid'][0] + spat_left)*coadd_dict['outmask']
spat_img_psuedo[ispec, ispat] = image_temp
nused_psuedo[ispec, ispat] = coadd_dict['nused']
wave_min[ispec, islit] = coadd_dict['wave_min']
wave_max[ispec, islit] = coadd_dict['wave_max']
wave_mid[ispec, islit] = coadd_dict['wave_mid']
wave_mask[ispec, islit] = True
# Fill in the rest of the wave_mid with the corresponding points in the wave_grid
#wave_this = wave_mid[wave_mask[:,islit], islit]
#ind_upper = np.argmin(np.abs(self.wave_grid_mid - wave_this.max())) + 1
#if nspec_vec[islit] != nspec_psuedo:
# wave_mid[nspec_vec[islit]:, islit] = self.wave_grid_mid[ind_upper:ind_upper + (nspec_psuedo-nspec_vec[islit])]
dspat_mid[ispat, islit] = coadd_dict['dspat_mid']
slit_left[:,islit] = np.full(nspec_psuedo, spat_left)
slit_righ[:,islit] = np.full(nspec_psuedo, spat_righ)
spec_max1[islit] = nspec_vec[islit]-1
spat_left = spat_righ + nspat_pad
slitcen = (slit_left + slit_righ)/2.0
tslits_dict_psuedo = dict(slit_left=slit_left, slit_righ=slit_righ, slitcen=slitcen,
nspec=nspec_psuedo, nspat=nspat_psuedo, pad=0,
nslits = self.nslits, binspectral=1, binspatial=1, spectrograph=self.spectrograph.spectrograph,
spec_min=spec_min1, spec_max=spec_max1,
maskslits=np.zeros(slit_left.shape[1], dtype=np.bool))
slitmask_psuedo = pixels.tslits2mask(tslits_dict_psuedo)
# This is a kludge to deal with cases where bad wavelengths result in large regions where the slit is poorly sampled,
# which wreaks havoc on the local sky-subtraction
min_slit_frac = 0.70
spec_min = np.zeros(self.nslits)
spec_max = np.zeros(self.nslits)
for islit in range(self.nslits):
slit_width = np.sum(inmask_psuedo*(slitmask_psuedo == islit),axis=1)
slit_width_img = np.outer(slit_width, np.ones(nspat_psuedo))
med_slit_width = np.median(slit_width_img[slitmask_psuedo == islit])
nspec_eff = np.sum(slit_width > min_slit_frac*med_slit_width)
nsmooth = int(np.fmax(np.ceil(nspec_eff*0.02),10))
slit_width_sm = scipy.ndimage.filters.median_filter(slit_width, size=nsmooth, mode='reflect')
igood = (slit_width_sm > min_slit_frac*med_slit_width)
spec_min[islit] = spec_vec_psuedo[igood].min()
spec_max[islit] = spec_vec_psuedo[igood].max()
bad_pix = (slit_width_img < min_slit_frac*med_slit_width) & (slitmask_psuedo == islit)
inmask_psuedo[bad_pix] = False
# Update with tslits_dict_psuedo
tslits_dict_psuedo['spec_min'] = spec_min
tslits_dict_psuedo['spec_max'] = spec_max
psuedo_dict = dict(nspec=nspec_psuedo, nspat=nspat_psuedo, imgminsky=imgminsky_psuedo, sciivar=sciivar_psuedo,
inmask=inmask_psuedo, tilts=tilts_psuedo,
waveimg=waveimg_psuedo, spat_img = spat_img_psuedo,
tslits_dict=tslits_dict_psuedo,
wave_mask=wave_mask, wave_mid=wave_mid, wave_min=wave_min, wave_max=wave_max)
return psuedo_dict
def reduce(self, psuedo_dict, show=None, show_peaks=None):
show = self.show if show is None else show
show_peaks = self.show_peaks if show_peaks is None else show_peaks
# Generate a ScienceImage
sciImage = scienceimage.ScienceImage(self.spectrograph, self.det,
self.par['scienceframe']['process'],
psuedo_dict['imgminsky'],
psuedo_dict['sciivar'],
np.zeros_like(psuedo_dict['inmask']), # Dummy bpm
rn2img=np.zeros_like(psuedo_dict['inmask']), # Dummy rn2img
crmask=np.invert(psuedo_dict['inmask']))
slitmask_psuedo = pixels.tslits2mask(psuedo_dict['tslits_dict'])
sciImage.build_mask(slitmask=slitmask_psuedo)
# Make changes to parset specific to 2d coadds
parcopy = copy.deepcopy(self.par)
parcopy['scienceimage']['findobj']['trace_npoly'] = 3 # Low order traces since we are rectified
#parcopy['scienceimage']['find_extrap_npoly'] = 1 # Use low order for trace extrapolation
# Instantiate Calibrations class
caliBrate = calibrations.MultiSlitCalibrations(None, parcopy['calibrations'], self.spectrograph)
caliBrate.tslits_dict = psuedo_dict['tslits_dict']
caliBrate.tilts_dict = dict(tilts=psuedo_dict['tilts'])
caliBrate.mswave = psuedo_dict['waveimg']
#
# redux = reduce.instantiate_me(sciImage, self.spectrograph, psuedo_dict['tslits_dict'], parcopy, psuedo_dict['tilts'],
redux=reduce.instantiate_me(sciImage, self.spectrograph, parcopy, caliBrate,
ir_redux=self.ir_redux, objtype='science_coadd2d',
det=self.det, binning=self.binning, show=show)
if show:
redux.show('image', image=psuedo_dict['imgminsky']*(sciImage.mask == 0), chname = 'imgminsky', slits=True, clear=True)
# Object finding
sobjs_obj, nobj, skymask_init = redux.find_objects(sciImage.image, show_peaks=show_peaks)
# Local sky-subtraction
global_sky_psuedo = np.zeros_like(psuedo_dict['imgminsky']) # No global sky for co-adds since we go straight to local
skymodel_psuedo, objmodel_psuedo, ivarmodel_psuedo, outmask_psuedo, sobjs = redux.local_skysub_extract(
caliBrate.mswave, global_sky_psuedo, sobjs_obj, spat_pix=psuedo_dict['spat_img'], model_noise=False,
show_profile=show, show=show)
if self.ir_redux:
sobjs.purge_neg()
# Add the information about the fixed wavelength grid to the sobjs
for spec in sobjs:
idx = spec.slit_orderindx
# Fill
spec.BOX_WAVE_GRID_MASK, spec.OPT_WAVE_GRID_MASK = [psuedo_dict['wave_mask'][:,idx]]*2
spec.BOX_WAVE_GRID, spec.OPT_WAVE_GRID = [psuedo_dict['wave_mid'][:,idx]]*2
spec.BOX_WAVE_GRID_MIN, spec.OPT_WAVE_GRID_MIN = [psuedo_dict['wave_min'][:,idx]]*2
spec.BOX_WAVE_GRID_MAX, spec.OPT_WAVE_GRID_MAX = [psuedo_dict['wave_max'][:,idx]]*2
# Add the rest to the psuedo_dict
psuedo_dict['skymodel'] = skymodel_psuedo
psuedo_dict['objmodel'] = objmodel_psuedo
psuedo_dict['ivarmodel'] = ivarmodel_psuedo
psuedo_dict['outmask'] = outmask_psuedo
psuedo_dict['sobjs'] = sobjs
self.psuedo_dict=psuedo_dict
return psuedo_dict['imgminsky'], psuedo_dict['sciivar'], skymodel_psuedo, objmodel_psuedo, ivarmodel_psuedo, outmask_psuedo, sobjs
def save_masters(self, master_dir):
# Write out the psuedo master files to disk
master_key_dict = self.stack_dict['master_key_dict']
# TODO: These saving operations are a temporary kludge
waveImage = WaveImage(None, None, None, self.spectrograph, # spectrograph is needed for header
None, None, master_key=master_key_dict['arc'],
master_dir=master_dir)
waveImage.save(image=self.psuedo_dict['waveimg'])
edges = edgetrace.EdgeTraceSet.from_tslits_dict(self.psuedo_dict['tslits_dict'],
master_key_dict['trace'], master_dir)
edges.save()
def snr_report(self, snr_bar, slitid=None):
# Print out a report on the SNR
msg_string = msgs.newline() + '-------------------------------------'
msg_string += msgs.newline() + ' Summary for highest S/N object'
if slitid is not None:
msg_string += msgs.newline() + ' found on slitid = {:d} '.format(slitid)
msg_string += msgs.newline() + '-------------------------------------'
msg_string += msgs.newline() + ' exp# S/N'
for iexp, snr in enumerate(snr_bar):
msg_string += msgs.newline() + ' {:d} {:5.2f}'.format(iexp, snr)
msg_string += msgs.newline() + '-------------------------------------'
msgs.info(msg_string)
def get_good_slits(self, only_slits):
only_slits = [only_slits] if (only_slits is not None and
isinstance(only_slits, (int, np.int, np.int64, np.int32))) else only_slits
good_slits = np.arange(self.nslits) if only_slits is None else only_slits
return good_slits
def offset_slit_cen(self, slitid, offsets):
nexp = len(offsets)
tslits_dict_list = self.stack_dict['tslits_dict_list']
nspec, nslits = tslits_dict_list[0]['slit_left'].shape
ref_trace_stack = np.zeros((nspec, nexp))
for iexp, tslits_dict in enumerate(tslits_dict_list):
ref_trace_stack[:, iexp] = (tslits_dict['slit_left'][:, slitid] +
tslits_dict['slit_righ'][:, slitid])/2.0 - offsets[iexp]
return ref_trace_stack
def get_wave_grid(self, **kwargs_wave):
"""
Routine to create a wavelength grid for 2d coadds using all
of the wavelengths of the extracted objects. Calls
coadd1d.get_wave_grid.
Args:
**kwargs_wave (dict):
Optional argumments for coadd1d.get_wve_grid function
Returns:
tuple: Returns the following:
- wave_grid (np.ndarray): New wavelength grid, not
masked
- wave_grid_mid (np.ndarray): New wavelength grid
evaluated at the centers of the wavelength bins, that
is this grid is simply offset from wave_grid by
dsamp/2.0, in either linear space or log10 depending
on whether linear or (log10 or velocity) was
requested. For iref or concatenate the linear
wavelength sampling will be calculated.
- dsamp (float): The pixel sampling for wavelength grid
created.
"""
nobjs_tot = np.array([len(spec) for spec in self.stack_dict['specobjs_list']]).sum()
waves = np.zeros((self.nspec, nobjs_tot))
masks = np.zeros_like(waves, dtype=bool)
indx = 0
for spec_this in self.stack_dict['specobjs_list']:
for spec in spec_this:
waves[:, indx] = spec.OPT_WAVE
masks[:, indx] = spec.OPT_MASK
indx += 1
wave_grid, wave_grid_mid, dsamp = coadd1d.get_wave_grid(waves, masks=masks, **kwargs_wave)
return wave_grid, wave_grid_mid, dsamp
def load_coadd2d_stacks(self, spec2d_files):
"""
Routine to read in required images for 2d coadds given a list of spec2d files.
Args:
spec2d_files: list
List of spec2d filenames
det: int
detector in question
Returns:
dict: Dictionary containing all the images and keys required
for perfomring 2d coadds.
"""
# Get the detector string
sdet = parse.get_dnum(self.det, prefix=False)
# Get the master dir
redux_path = os.getcwd()
# Grab the files
head2d_list = []
tracefiles = []
waveimgfiles = []
tiltfiles = []
spec1d_files = []
for f in spec2d_files:
head = fits.getheader(f)
if os.path.exists(head['PYPMFDIR']):
master_path = head['PYPMFDIR']
else:
master_dir = os.path.basename(head['PYPMFDIR'])
master_path = os.path.join(os.path.split(os.path.split(f)[0])[0], master_dir)
trace_key = '{0}_{1:02d}'.format(head['TRACMKEY'], self.det)
wave_key = '{0}_{1:02d}'.format(head['ARCMKEY'], self.det)
head2d_list.append(head)
spec1d_files.append(f.replace('spec2d', 'spec1d'))
tracefiles.append(os.path.join(master_path,
'{0}.gz'.format(MasterFrame.construct_file_name('Edges', trace_key))))
# MasterFrame.construct_file_name('Trace', trace_key)))
waveimgfiles.append(os.path.join(master_path,
MasterFrame.construct_file_name('Wave', wave_key)))
tiltfiles.append(os.path.join(master_path,
MasterFrame.construct_file_name('Tilts', wave_key)))
nfiles = len(spec2d_files)
specobjs_list = []
head1d_list = []
tslits_dict_list = []
# TODO Sort this out with the correct detector extensions etc.
# Read in the image stacks
waveimgfile, tiltfile, tracefile = None, None, None
for ifile in range(nfiles):
# Load up the calibs, if needed
if waveimgfiles[ifile] != waveimgfile:
waveimg = WaveImage.from_master_file(waveimgfiles[ifile]).image
if tiltfile != tiltfiles[ifile]:
tilts = WaveTilts.from_master_file(tiltfiles[ifile]).tilts_dict
# Save
waveimgfile = waveimgfiles[ifile]
tiltfile = tiltfiles[ifile]
#
hdu = fits.open(spec2d_files[ifile])
# One detector, sky sub for now
names = [hdu[i].name for i in range(len(hdu))]
# science image
try:
exten = names.index('DET{:s}-PROCESSED'.format(sdet))
except: # Backwards compatability
det_error_msg(exten, sdet)
sciimg = hdu[exten].data
# skymodel
try:
exten = names.index('DET{:s}-SKY'.format(sdet))
except: # Backwards compatability
det_error_msg(exten, sdet)
skymodel = hdu[exten].data
# Inverse variance model
try:
exten = names.index('DET{:s}-IVARMODEL'.format(sdet))
except ValueError: # Backwards compatability
det_error_msg(exten, sdet)
sciivar = hdu[exten].data
# Mask
try:
exten = names.index('DET{:s}-MASK'.format(sdet))
except ValueError: # Backwards compatability
det_error_msg(exten, sdet)
mask = hdu[exten].data
if ifile == 0:
# the two shapes accomodate the possibility that waveimg and tilts are binned differently
shape_wave = (nfiles, waveimg.shape[0], waveimg.shape[1])
shape_sci = (nfiles, sciimg.shape[0], sciimg.shape[1])
waveimg_stack = np.zeros(shape_wave, dtype=float)
tilts_stack = np.zeros(shape_wave, dtype=float)
sciimg_stack = np.zeros(shape_sci, dtype=float)
skymodel_stack = np.zeros(shape_sci, dtype=float)
sciivar_stack = np.zeros(shape_sci, dtype=float)
mask_stack = np.zeros(shape_sci, dtype=float)
slitmask_stack = np.zeros(shape_sci, dtype=float)
# Slit Traces and slitmask
if tracefile != tracefiles[ifile]:
tslits_dict \
= edgetrace.EdgeTraceSet.from_file(tracefiles[ifile]).convert_to_tslits_dict()
tracefile = tracefiles[ifile]
#
tslits_dict_list.append(tslits_dict)
slitmask = pixels.tslits2mask(tslits_dict)
slitmask_stack[ifile, :, :] = slitmask
waveimg_stack[ifile, :, :] = waveimg
tilts_stack[ifile, :, :] = tilts['tilts']
sciimg_stack[ifile, :, :] = sciimg
sciivar_stack[ifile, :, :] = sciivar
mask_stack[ifile, :, :] = mask
skymodel_stack[ifile, :, :] = skymodel
# Specobjs
if os.path.isfile(spec1d_files[ifile]):
sobjs = specobjs.SpecObjs.from_fitsfile(spec1d_files[ifile])
head1d_list.append(sobjs.header)
this_det = sobjs.DET == self.det
specobjs_list.append(sobjs[this_det])
# slitmask_stack = np.einsum('i,jk->ijk', np.ones(nfiles), slitmask)
# Fill the master key dict
head2d = head2d_list[0]
master_key_dict = {}
master_key_dict['frame'] = head2d['FRAMMKEY'] + '_{:02d}'.format(self.det)
master_key_dict['bpm'] = head2d['BPMMKEY'] + '_{:02d}'.format(self.det)
master_key_dict['bias'] = head2d['BIASMKEY'] + '_{:02d}'.format(self.det)
master_key_dict['arc'] = head2d['ARCMKEY'] + '_{:02d}'.format(self.det)
master_key_dict['trace'] = head2d['TRACMKEY'] + '_{:02d}'.format(self.det)
master_key_dict['flat'] = head2d['FLATMKEY'] + '_{:02d}'.format(self.det)
# TODO In the future get this stuff from the headers once data model finalized
spectrograph = util.load_spectrograph(tslits_dict['spectrograph'])
stack_dict = dict(specobjs_list=specobjs_list, tslits_dict_list=tslits_dict_list,
slitmask_stack=slitmask_stack,
sciimg_stack=sciimg_stack, sciivar_stack=sciivar_stack,
skymodel_stack=skymodel_stack, mask_stack=mask_stack,
tilts_stack=tilts_stack, waveimg_stack=waveimg_stack,
head1d_list=head1d_list, head2d_list=head2d_list,
redux_path=redux_path,
master_key_dict=master_key_dict,
spectrograph=spectrograph.spectrograph,
pypeline=spectrograph.pypeline)
return stack_dict
# Multislit can coadd with:
# 1) input offsets or if offsets is None, it will find the brightest trace and compute them
# 2) specified weights, or if weights is None and auto_weights=True, it will compute weights using the brightest object
# Echelle can either stack with:
# 1) input offsets or if offsets is None, it will find the objid of brightest trace and stack all orders relative to the trace of this object.
# 2) specified weights, or if weights is None and auto_weights=True,
# it will use wavelength dependent weights determined from the spectrum of the brightest objects objid on each order
class MultiSlitCoadd2d(Coadd2d):
"""
Child of Coadd2d for Multislit and Longslit reductions
# Multislit can coadd with:
# 1) input offsets or if offsets is None, it will find the brightest trace and compute them
# 2) specified weights, or if weights is None and auto_weights=True, it will compute weights using the brightest object
"""
def __init__(self, spec2d_files, spectrograph, det=1, offsets=None, weights='auto', sn_smooth_npix=None,
ir_redux=False, par=None, show=False, show_peaks=False, debug_offsets=False, debug=False, **kwargs_wave):
super(MultiSlitCoadd2d, self).__init__(spec2d_files, spectrograph, det=det, offsets=offsets, weights=weights,
sn_smooth_npix=sn_smooth_npix, ir_redux=ir_redux, par=par,
show=show, show_peaks=show_peaks, debug_offsets=debug_offsets,
debug=debug, **kwargs_wave)
## Use Cases:
# 1) offsets is None -- auto compute offsets from brightest object, so then default to auto_weights=True
# 2) offsets not None, weights = None (uniform weighting) or weights is not None (input weights)
# 3) offsets not None, auto_weights=True (Do not support)
# Default wave_method for Multislit is linear
kwargs_wave['wave_method'] = 'linear' if 'wave_method' not in kwargs_wave else kwargs_wave['wave_method']
self.wave_grid, self.wave_grid_mid, self.dsamp = self.get_wave_grid(**kwargs_wave)
if offsets is None:
self.objid_bri, self.slitid_bri, self.snr_bar_bri, self.offsets = self.compute_offsets()
self.use_weights = self.parse_weights(weights)
def parse_weights(self, weights):
if 'auto' in weights:
rms_sn, use_weights = self.optimal_weights(self.slitid_bri, self.objid_bri, const_weights=True)
return use_weights
elif 'uniform' in weights:
return 'uniform'
elif isinstance(weights, (list, np.ndarray)):
if len(weights) != self.nexp:
msgs.error('If weights are input it must be a list/array with same number of elements as exposures')
return weights
else:
msgs.error('Unrecognized format for weights')
# TODO When we run multislit, we actually compute the rebinned images twice. Once here to compute the offsets
# and another time to weighted_combine the images in compute2d. This could be sped up
def compute_offsets(self):
objid_bri, slitid_bri, snr_bar_bri = self.get_brightest_obj(self.stack_dict['specobjs_list'], self.nslits)
msgs.info('Determining offsets using brightest object on slit: {:d} with avg SNR={:5.2f}'.format(slitid_bri,np.mean(snr_bar_bri)))
thismask_stack = self.stack_dict['slitmask_stack'] == slitid_bri
trace_stack_bri = np.zeros((self.nspec, self.nexp))
# TODO Need to think abbout whether we have multiple tslits_dict for each exposure or a single one
for iexp in range(self.nexp):
trace_stack_bri[:,iexp] = (self.stack_dict['tslits_dict_list'][iexp]['slit_left'][:,slitid_bri] +
self.stack_dict['tslits_dict_list'][iexp]['slit_righ'][:,slitid_bri])/2.0
# Determine the wavelength grid that we will use for the current slit/order
wave_bins = get_wave_bins(thismask_stack, self.stack_dict['waveimg_stack'], self.wave_grid)
dspat_bins, dspat_stack = get_spat_bins(thismask_stack, trace_stack_bri)
sci_list = [self.stack_dict['sciimg_stack'] - self.stack_dict['skymodel_stack']]
var_list = []
msgs.info('Rebinning Images')
sci_list_rebin, var_list_rebin, norm_rebin_stack, nsmp_rebin_stack = rebin2d(
wave_bins, dspat_bins, self.stack_dict['waveimg_stack'], dspat_stack, thismask_stack,
(self.stack_dict['mask_stack'] == 0), sci_list, var_list)
thismask = np.ones_like(sci_list_rebin[0][0,:,:],dtype=bool)
nspec_psuedo, nspat_psuedo = thismask.shape
slit_left = np.full(nspec_psuedo, 0.0)
slit_righ = np.full(nspec_psuedo, nspat_psuedo)
inmask = norm_rebin_stack > 0
traces_rect = np.zeros((nspec_psuedo, self.nexp))
sobjs = specobjs.SpecObjs()
#specobj_dict = {'setup': 'unknown', 'slitid': 999, 'orderindx': 999, 'det': self.det, 'objtype': 'unknown',
# 'pypeline': 'MultiSLit' + '_coadd_2d'}
for iexp in range(self.nexp):
sobjs_exp, _ = extract.objfind(sci_list_rebin[0][iexp,:,:], thismask, slit_left, slit_righ,
inmask=inmask[iexp,:,:], ir_redux=self.ir_redux,
fwhm=self.par['scienceimage']['findobj']['find_fwhm'],
trim_edg=self.par['scienceimage']['findobj']['find_trim_edge'],
npoly_cont=self.par['scienceimage']['findobj']['find_npoly_cont'],
maxdev=self.par['scienceimage']['findobj']['find_maxdev'],
ncoeff=3, sig_thresh=self.par['scienceimage']['findobj']['sig_thresh'], nperslit=1,
show_trace=self.debug_offsets, show_peaks=self.debug_offsets)
sobjs.add_sobj(sobjs_exp)
traces_rect[:, iexp] = sobjs_exp.TRACE_SPAT
# Now deterimine the offsets. Arbitrarily set the zeroth trace to the reference
med_traces_rect = np.median(traces_rect,axis=0)
offsets = med_traces_rect[0] - med_traces_rect
# Print out a report on the offsets
msg_string = msgs.newline() + '---------------------------------------------'
msg_string += msgs.newline() + ' Summary of offsets for highest S/N object '
msg_string += msgs.newline() + ' found on slitid = {:d} '.format(slitid_bri)
msg_string += msgs.newline() + '---------------------------------------------'
msg_string += msgs.newline() + ' exp# offset '
for iexp, off in enumerate(offsets):
msg_string += msgs.newline() + ' {:d} {:5.2f}'.format(iexp, off)
msg_string += msgs.newline() + '-----------------------------------------------'
msgs.info(msg_string)
if self.debug_offsets:
for iexp in range(self.nexp):
plt.plot(traces_rect[:, iexp], linestyle='--', label='original trace')
plt.plot(traces_rect[:, iexp] + offsets[iexp], label='shifted traces')
plt.legend()
plt.show()
return objid_bri, slitid_bri, snr_bar_bri, offsets
def get_brightest_obj(self, specobjs_list, nslits):
"""
Utility routine to find the brightest object in each exposure given a specobjs_list for MultiSlit reductions.
Args:
specobjs_list: list
List of SpecObjs objects.
echelle: bool, default=True, optional
Returns:
tuple: Returns the following:
- objid: ndarray, int, shape (len(specobjs_list),):
Array of object ids representing the brightest object
in each exposure
- slitid (int): Slit that highest S/N ratio object is on
(only for pypeline=MultiSlit)
- snr_bar: ndarray, float, shape (len(list),): Average
S/N over all the orders for this object
"""
nexp = len(specobjs_list)
nspec = specobjs_list[0][0].TRACE_SPAT.shape[0]
slit_snr_max = np.full((nslits, nexp), -np.inf)
objid_max = np.zeros((nslits, nexp), dtype=int)
# Loop over each exposure, slit, find the brighest object on that slit for every exposure
for iexp, sobjs in enumerate(specobjs_list):
for islit in range(nslits):
ithis = sobjs.SLITID == islit
nobj_slit = np.sum(ithis)
if np.any(ithis):
objid_this = sobjs[ithis].OBJID
flux = np.zeros((nspec, nobj_slit))
ivar = np.zeros((nspec, nobj_slit))
wave = np.zeros((nspec, nobj_slit))
mask = np.zeros((nspec, nobj_slit), dtype=bool)
for iobj, spec in enumerate(sobjs[ithis]):
flux[:, iobj] = spec.OPT_COUNTS
ivar[:, iobj] = spec.OPT_COUNTS_IVAR
wave[:, iobj] = spec.OPT_WAVE
mask[:, iobj] = spec.OPT_MASK
rms_sn, weights = coadd1d.sn_weights(wave, flux, ivar, mask, None, const_weights=True)
imax = np.argmax(rms_sn)
slit_snr_max[islit, iexp] = rms_sn[imax]
objid_max[islit, iexp] = objid_this[imax]
# Find the highest snr object among all the slits
slit_snr = np.mean(slit_snr_max, axis=1)
slitid = slit_snr.argmax()
snr_bar_mean = slit_snr[slitid]
snr_bar = slit_snr_max[slitid, :]
objid = objid_max[slitid, :]
if (snr_bar_mean == -np.inf):
msgs.error('You do not appear to have a unique reference object that was traced as the highest S/N '
'ratio on the same slit of every exposure')
self.snr_report(snr_bar, slitid=slitid)
return objid, slitid, snr_bar
# TODO add an option here to actually use the reference trace for cases where they are on the same slit and it is
# single slit???
def reference_trace_stack(self, slitid, offsets=None, objid=None):
return self.offset_slit_cen(slitid, offsets)
class EchelleCoadd2d(Coadd2d):
"""
Child of Coadd2d for Multislit and Longslit reductions
# Echelle can either stack with:
# 1) input offsets or if offsets is None, it will find the objid of brightest trace and stack all orders relative to the trace of this object.
# 2) specified weights, or if weights is None and auto_weights=True,
# it will use wavelength dependent weights determined from the spectrum of the brightest objects objid on each order
"""
def __init__(self, spec2d_files, spectrograph, det=1, offsets=None, weights='auto', sn_smooth_npix=None,
ir_redux=False, par=None, show=False, show_peaks=False, debug_offsets=False, debug=False, **kwargs_wave):
super(EchelleCoadd2d, self).__init__(spec2d_files, spectrograph, det=det, offsets=offsets, weights=weights,
sn_smooth_npix=sn_smooth_npix, ir_redux=ir_redux, par=par,
show=show, show_peaks=show_peaks, debug_offsets=debug_offsets, debug=debug,
**kwargs_wave)
# Default wave_method for Echelle is log10
kwargs_wave['wave_method'] = 'log10' if 'wave_method' not in kwargs_wave else kwargs_wave['wave_method']
self.wave_grid, self.wave_grid_mid, self.dsamp = self.get_wave_grid(**kwargs_wave)
self.objid_bri = None
self.slitid_bri = None
self.snr_bar_bri = None
if offsets is None:
self.objid_bri, self.slitid_bri, self.snr_bar_bri = self.get_brightest_obj(self.stack_dict['specobjs_list'], self.nslits)
self.use_weights = self.parse_weights(weights)
def parse_weights(self, weights):
if 'auto' in weights:
return 'auto_echelle'
elif 'uniform' in weights:
return 'uniform'
elif isinstance(weights, (list, np.ndarray)):
if len(weights) != self.nexp:
msgs.error('If weights are input it must be a list/array with same number of elements as exposures')
return weights
else:
msgs.error('Unrecognized format for weights')
def get_brightest_obj(self, specobjs_list, nslits):
"""
Utility routine to find the brightest object in each exposure given a specobjs_list for Echelle reductions.
Args:
specobjs_list: list
List of SpecObjs objects.
echelle: bool, default=True, optional
Returns:
tuple: Returns the following:
- objid: ndarray, int, shape (len(specobjs_list),):
Array of object ids representing the brightest object
in each exposure
- snr_bar: ndarray, float, shape (len(list),): Average
S/N over all the orders for this object
"""
nexp = len(specobjs_list)
objid = np.zeros(nexp, dtype=int)
snr_bar = np.zeros(nexp)
# norders = specobjs_list[0].ech_orderindx.max() + 1
for iexp, sobjs in enumerate(specobjs_list):
uni_objid = np.unique(sobjs.ECH_OBJID)
nobjs = len(uni_objid)
order_snr = np.zeros((nslits, nobjs))
for iord in range(nslits):
for iobj in range(nobjs):
ind = (sobjs.ECH_ORDERINDX == iord) & (sobjs.ECH_OBJID == uni_objid[iobj])
flux = sobjs[ind][0].OPT_COUNTS
ivar = sobjs[ind][0].OPT_COUNTS_IVAR
wave = sobjs[ind][0].OPT_WAVE
mask = sobjs[ind][0].OPT_MASK
rms_sn, weights = coadd1d.sn_weights(wave, flux, ivar, mask, self.sn_smooth_npix, const_weights=True)
order_snr[iord, iobj] = rms_sn
# Compute the average SNR and find the brightest object
snr_bar_vec = np.mean(order_snr, axis=0)
objid[iexp] = uni_objid[snr_bar_vec.argmax()]
snr_bar[iexp] = snr_bar_vec[snr_bar_vec.argmax()]
self.snr_report(snr_bar)
return objid, None, snr_bar
def reference_trace_stack(self, slitid, offsets=None, objid=None):
"""
Utility function for determining the reference trace about which 2d coadds are performed.
There are two modes of operation to determine the reference trace for the 2d coadd of a given slit/order:
1) offsets: we stack about the center of the slit for the slit in question with the input offsets added
2) ojbid: we stack about the trace ofa reference object for this slit given for each exposure by the input objid
Either offsets or objid must be provided, but the code will raise an exception if both are provided.
Args:
slitid (int):
The slit or order that we are currently considering
stack_dict (dict):
Dictionary containing all the images and keys required for perfomring 2d coadds.
offsets (list or np.ndarray):
An array of offsets with the same dimensionality as the nexp, the numer of images being coadded.
objid: (list or np.ndarray):
An array of objids with the same dimensionality as the nexp, the number of images being coadded.
Returns:
ref_trace_stack
ref_trace_stack (np.ndarray):
An array with shape (nspec, nexp) containing the reference trace for each of the nexp exposures.
"""
if offsets is not None and objid is not None:
msgs.errror('You can only input offsets or an objid, but not both')
nexp = len(offsets) if offsets is not None else len(objid)
if offsets is not None:
return self.offset_slit_cen(slitid, offsets)
elif objid is not None:
specobjs_list = self.stack_dict['specobjs_list']
nspec = specobjs_list[0][0].TRACE_SPAT.shape[0]
# Grab the traces, flux, wavelength and noise for this slit and objid.
ref_trace_stack = np.zeros((nspec, nexp), dtype=float)
for iexp, sobjs in enumerate(specobjs_list):
ithis = (sobjs.ECH_ORDERINDX == slitid) & (sobjs.ECH_OBJID == objid[iexp])
ref_trace_stack[:, iexp] = sobjs[ithis].TRACE_SPAT
return ref_trace_stack
else:
msgs.error('You must input either offsets or an objid to determine the stack of reference traces')
return None
def instantiate_me(spec2d_files, spectrograph, **kwargs):
"""
Instantiate the CoAdd2d subclass appropriate for the provided
spectrograph.
The class must be subclassed from Reduce. See :class:`Reduce` for
the description of the valid keyword arguments.
Args:
spectrograph
(:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The instrument used to collect the data to be reduced.
tslits_dict: dict
dictionary containing slit/order boundary information
tilts (np.ndarray):
Returns:
:class:`PypeIt`: One of the classes with :class:`PypeIt` as its
base.
"""
indx = [ c.__name__ == (spectrograph.pypeline + 'Coadd2d') for c in Coadd2d.__subclasses__() ]
if not np.any(indx):
msgs.error('Pipeline {0} is not defined!'.format(spectrograph.pypeline))
return Coadd2d.__subclasses__()[np.where(indx)[0][0]](spec2d_files, spectrograph, **kwargs)
# TODO: Can we get rid of all the commented lines below?
# Determine brightest object either if offsets were not input, or if automatic weight determiniation is desired
# if offsets is None or auto_weights is True:
# self.objid_bri, self.slitid_bri, self.snr_bar_bri = get_brightest_obj(self.stack_dict['specobjs_list'], self.nslits)
# else:
# self.objid_bri, self.slitid_bri, self.snr_bar_bri = None, None, None
# Echelle can either stack with:
# 1) input offsets or if offsets is None, it will find the objid of brightest trace and stack all orders relative to the trace of this object.
# 2) specified weights, or if weights is None and auto_weights=True,
# it will use wavelength dependent weights determined from the spectrum of the brightest objects objid on each order
# if offsets is None:
# If echelle and offsets is None get the brightest object and stack about that
#
# if 'MultiSlit' in pypeline:
# msgs.info('Determining offsets using brightest object on slit: {:d} with avg SNR={:5.2f}'.format(
# slitid_bri, np.mean(snr_bar_bri)))
# thismask_stack = self.stack_dict['slitmask_stack'] == slitid_bri
# trace_stack_bri = np.zeros((nspec, nexp))
# # TODO Need to think abbout whether we have multiple tslits_dict for each exposure or a single one
# for iexp in range(nexp):
# trace_stack_bri[:, iexp] = (stack_dict['tslits_dict']['slit_left'][:, slitid_bri] +
# stack_dict['tslits_dict']['slit_righ'][:, slitid_bri]) / 2.0
# # Determine the wavelength grid that we will use for the current slit/order
# wave_bins = get_wave_bins(thismask_stack, stack_dict['waveimg_stack'], wave_grid)
# dspat_bins, dspat_stack = get_spat_bins(thismask_stack, trace_stack_bri)
#
# sci_list = [stack_dict['sciimg_stack'] - stack_dict['skymodel_stack'], stack_dict['waveimg_stack'], dspat_stack]
# var_list = [utils.calc_ivar(stack_dict['sciivar_stack'])]
#
# sci_list_rebin, var_list_rebin, norm_rebin_stack, nsmp_rebin_stack = rebin2d(
# wave_bins, dspat_bins, stack_dict['waveimg_stack'], dspat_stack, thismask_stack,
# (stack_dict['mask_stack'] == 0), sci_list, var_list)
# thismask = np.ones_like(sci_list_rebin[0][0, :, :], dtype=bool)
# nspec_psuedo, nspat_psuedo = thismask.shape
# slit_left = np.full(nspec_psuedo, 0.0)
# slit_righ = np.full(nspec_psuedo, nspat_psuedo)
# inmask = norm_rebin_stack > 0
# traces_rect = np.zeros((nspec_psuedo, nexp))
# sobjs = specobjs.SpecObjs()
# specobj_dict = {'setup': 'unknown', 'slitid': 999, 'orderindx': 999, 'det': det, 'objtype': 'unknown',
# 'pypeline': pypeline + '_coadd_2d'}
# for iexp in range(nexp):
# sobjs_exp, _ = extract.objfind(sci_list_rebin[0][iexp, :, :], thismask, slit_left, slit_righ,
# inmask=inmask[iexp, :, :], fwhm=3.0, maxdev=2.0, ncoeff=3, sig_thresh=10.0,
# nperslit=1,
# debug_all=debug, specobj_dict=specobj_dict)
# sobjs.add_sobj(sobjs_exp)
# traces_rect[:, iexp] = sobjs_exp.trace_spat
# # Now deterimine the offsets. Arbitrarily set the zeroth trace to the reference
# med_traces_rect = np.median(traces_rect, axis=0)
# offsets = med_traces_rect[0] - med_traces_rect
# if debug:
# for iexp in range(nexp):
# plt.plot(traces_rect[:, iexp], linestyle='--', label='original trace')
# plt.plot(traces_rect[:, iexp] + offsets[iexp], label='shifted traces')
# plt.legend()
# plt.show()
# rms_sn, weights = optimal_weights(stack_dict['specobjs_list'], slitid_bri, objid_bri,
# sn_smooth_npix, const_weights=True)
# # TODO compute the variance in the registration of the traces and write that out?
#
# coadd_list = []
# for islit in range(self.nslits):
# msgs.info('Performing 2d coadd for slit: {:d}/{:d}'.format(islit, self.nslits - 1))
# ref_trace_stack = reference_trace_stack(islit, self.stack_dict, offsets=offsets, objid=None)
# # Determine the wavelength dependent optimal weights and grab the reference trace
# if 'Echelle' in self.pypeline:
# rms_sn, weights = optimal_weights(self.stack_dict['specobjs_list'], islit, objid_bri, sn_smooth_npix)
#
# thismask_stack = self.stack_dict['slitmask_stack'] == islit
# # Perform the 2d coadd
# coadd_dict = conmpute_coadd2d(ref_trace_stack, self.stack_dict['sciimg_stack'], self.stack_dict['sciivar_stack'],
# self.stack_dict['skymodel_stack'], self.stack_dict['mask_stack'] == 0,
# self.stack_dict['tilts_stack'], thismask_stack, self.stack_dict['waveimg_stack'],
# self.wave_grid, weights=weights)
# coadd_list.append(coadd_dict)
#
# nspec_vec = np.zeros(self.nslits, dtype=int)
# nspat_vec = np.zeros(self.nslits, dtype=int)
# for islit, cdict in enumerate(coadd_list):
# nspec_vec[islit] = cdict['nspec']
# nspat_vec[islit] = cdict['nspat']
#
# # Determine the size of the psuedo image
# nspat_pad = 10
# nspec_psuedo = nspec_vec.max()
# nspat_psuedo = np.sum(nspat_vec) + (nslits + 1) * nspat_pad
# spec_vec_psuedo = np.arange(nspec_psuedo)
# shape_psuedo = (nspec_psuedo, nspat_psuedo)
# imgminsky_psuedo = np.zeros(shape_psuedo)
# sciivar_psuedo = np.zeros(shape_psuedo)
# waveimg_psuedo = np.zeros(shape_psuedo)
# tilts_psuedo = np.zeros(shape_psuedo)
# spat_img_psuedo = np.zeros(shape_psuedo)
# nused_psuedo = np.zeros(shape_psuedo, dtype=int)
# inmask_psuedo = np.zeros(shape_psuedo, dtype=bool)
# wave_mid = np.zeros((nspec_psuedo, nslits))
# wave_mask = np.zeros((nspec_psuedo, nslits), dtype=bool)
# wave_min = np.zeros((nspec_psuedo, nslits))
# wave_max = np.zeros((nspec_psuedo, nslits))
# dspat_mid = np.zeros((nspat_psuedo, nslits))
#
# spat_left = nspat_pad
# slit_left = np.zeros((nspec_psuedo, nslits))
# slit_righ = np.zeros((nspec_psuedo, nslits))
# spec_min1 = np.zeros(nslits)
# spec_max1 = np.zeros(nslits)
#
# for islit, coadd_dict in enumerate(coadd_list):
# spat_righ = spat_left + nspat_vec[islit]
# ispec = slice(0, nspec_vec[islit])
# ispat = slice(spat_left, spat_righ)
# imgminsky_psuedo[ispec, ispat] = coadd_dict['imgminsky']
# sciivar_psuedo[ispec, ispat] = coadd_dict['sciivar']
# waveimg_psuedo[ispec, ispat] = coadd_dict['waveimg']
# tilts_psuedo[ispec, ispat] = coadd_dict['tilts']
# # spat_psuedo is the sub-pixel image position on the rebinned psuedo image
# inmask_psuedo[ispec, ispat] = coadd_dict['outmask']
# image_temp = (coadd_dict['dspat'] - coadd_dict['dspat_mid'][0] + spat_left) * coadd_dict['outmask']
# spat_img_psuedo[ispec, ispat] = image_temp
# nused_psuedo[ispec, ispat] = coadd_dict['nused']
# wave_min[ispec, islit] = coadd_dict['wave_min']
# wave_max[ispec, islit] = coadd_dict['wave_max']
# wave_mid[ispec, islit] = coadd_dict['wave_mid']
# wave_mask[ispec, islit] = True
# # Fill in the rest of the wave_mid with the corresponding points in the wave_grid
# wave_this = wave_mid[wave_mask[:, islit], islit]
# ind_upper = np.argmin(np.abs(wave_grid_mid - np.max(wave_this.max()))) + 1
# if nspec_vec[islit] != nspec_psuedo:
# wave_mid[nspec_vec[islit]:, islit] = wave_grid_mid[ind_upper:ind_upper + (nspec_psuedo - nspec_vec[islit])]
#
# dspat_mid[ispat, islit] = coadd_dict['dspat_mid']
# slit_left[:, islit] = np.full(nspec_psuedo, spat_left)
# slit_righ[:, islit] = np.full(nspec_psuedo, spat_righ)
# spec_max1[islit] = nspec_vec[islit] - 1
# spat_left = spat_righ + nspat_pad
#
# slitcen = (slit_left + slit_righ) / 2.0
# tslits_dict_psuedo = dict(slit_left=slit_left, slit_righ=slit_righ, slitcen=slitcen,
# nspec=nspec_psuedo, nspat=nspat_psuedo, pad=0,
# nslits=self.nslits, binspectral=1, binspatial=1, spectrograph=spectrograph.spectrograph,
# spec_min=spec_min1, spec_max=spec_max1,
# maskslits=np.zeros(slit_left.shape[1], dtype=np.bool))
#
# slitmask_psuedo = pixels.tslits2mask(tslits_dict_psuedo)
# # This is a kludge to deal with cases where bad wavelengths result in large regions where the slit is poorly sampled,
# # which wreaks havoc on the local sky-subtraction
# min_slit_frac = 0.70
# spec_min = np.zeros(self.nslits)
# spec_max = np.zeros(self.nslits)
# for islit in range(self.nslits):
# slit_width = np.sum(inmask_psuedo * (slitmask_psuedo == islit), axis=1)
# slit_width_img = np.outer(slit_width, np.ones(nspat_psuedo))
# med_slit_width = np.median(slit_width_img[slitmask_psuedo == islit])
# nspec_eff = np.sum(slit_width > min_slit_frac * med_slit_width)
# nsmooth = int(np.fmax(np.ceil(nspec_eff * 0.02), 10))
# slit_width_sm = scipy.ndimage.filters.median_filter(slit_width, size=nsmooth, mode='reflect')
# igood = (slit_width_sm > min_slit_frac * med_slit_width)
# spec_min[islit] = spec_vec_psuedo[igood].min()
# spec_max[islit] = spec_vec_psuedo[igood].max()
# bad_pix = (slit_width_img < min_slit_frac * med_slit_width) & (slitmask_psuedo == islit)
# inmask_psuedo[bad_pix] = False
#
# # Update with tslits_dict_psuedo
# tslits_dict_psuedo['spec_min'] = spec_min
# tslits_dict_psuedo['spec_max'] = spec_max
# slitmask_psuedo = pixels.tslits2mask(tslits_dict_psuedo)
#
# # Make a fake bitmask from the outmask. We are kludging the crmask to be the outmask_psuedo here, and setting the bpm to
# # be good everywhere
# # mask = processimages.ProcessImages.build_mask(imgminsky_psuedo, sciivar_psuedo, np.invert(inmask_psuedo),
# # np.zeros_like(inmask_psuedo), slitmask=slitmask_psuedo)
#
# # Generate a ScienceImage
# sciImage = scienceimage.ScienceImage.from_images(self.spectrograph, det,
# self.par['scienceframe']['process'],
# np.zeros_like(inmask_psuedo), # Dummy bpm
# imgminsky_psuedo, sciivar_psuedo,
# np.zeros_like(inmask_psuedo), # Dummy rn2img
# crmask=np.invert(inmask_psuedo))
# sciImage.build_mask(slitmask=slitmask_psuedo)
#
# redux = reduce.instantiate_me(sciImage, self.spectrograph, tslits_dict_psuedo, par, tilts_psuedo, ir_redux=ir_redux,
# objtype='science', binning=self.binning)
#
# if show:
# redux.show('image', image=imgminsky_psuedo * (sciImage.mask == 0), chname='imgminsky', slits=True, clear=True)
# # Object finding
# sobjs_obj, nobj, skymask_init = redux.find_objects(sciImage.image, ir_redux=ir_redux, show_peaks=show_peaks, show=show)
# # Local sky-subtraction
# global_sky_psuedo = np.zeros_like(imgminsky_psuedo) # No global sky for co-adds since we go straight to local
# skymodel_psuedo, objmodel_psuedo, ivarmodel_psuedo, outmask_psuedo, sobjs = \
# redux.local_skysub_extract(waveimg_psuedo, global_sky_psuedo, sobjs_obj, spat_pix=spat_psuedo,
# model_noise=False, show_profile=show, show=show)
#
# if ir_redux:
# sobjs.purge_neg()
#
# # Add the information about the fixed wavelength grid to the sobjs
# for spec in sobjs:
# spec.boxcar['WAVE_GRID_MASK'] = wave_mask[:, spec.slitid]
# spec.boxcar['WAVE_GRID'] = wave_mid[:, spec.slitid]
# spec.boxcar['WAVE_GRID_MIN'] = wave_min[:, spec.slitid]
# spec.boxcar['WAVE_GRID_MAX'] = wave_max[:, spec.slitid]
#
# spec.optimal['WAVE_GRID_MASK'] = wave_mask[:, spec.slitid]
# spec.optimal['WAVE_GRID'] = wave_mid[:, spec.slitid]
# spec.optimal['WAVE_GRID_MIN'] = wave_min[:, spec.slitid]
# spec.optimal['WAVE_GRID_MAX'] = wave_max[:, spec.slitid]
#
# # TODO Implement flexure and heliocentric corrections on the single exposure 1d reductions and apply them to the
# # waveimage. Change the data model to accomodate a wavelength model for each image.
# # Using the same implementation as in core/pypeit
#
# # Write out the psuedo master files to disk
# master_key_dict = self.stack_dict['master_key_dict']
#
# # TODO: These saving operations are a temporary kludge
# waveImage = WaveImage(None, None, None, None, None, None, master_key=master_key_dict['arc'],
# master_dir=master_dir)
# waveImage.save(image=waveimg_psuedo)
#
# traceSlits = TraceSlits(None, None, master_key=master_key_dict['trace'], master_dir=master_dir)
# traceSlits.save(tslits_dict=tslits_dict_psuedo)
# return imgminsky_psuedo, sciivar_psuedo, skymodel_psuedo, objmodel_psuedo, ivarmodel_psuedo, outmask_psuedo, sobjs
#
#
#
# def get_brightest_obj(specobjs_list, nslits, pypeline):
# """
# Utility routine to find the brightest object in each exposure given a specobjs_list. This currently only works
# for echelle.
#
# Parameters:
# specobjs_list: list
# List of SpecObjs objects.
# Optional Parameters:
# echelle: bool, default=True
#
# Returns:
# (objid, slitid, snr_bar), tuple
#
# objid: ndarray, int, shape (len(specobjs_list),)
# Array of object ids representing the brightest object in each exposure
# slitid (int):
# Slit that highest S/N ratio object is on (only for pypeline=MultiSlit)
# snr_bar: ndarray, float, shape (len(list),)
# Average S/N over all the orders for this object
#
# """
# nexp = len(specobjs_list)
# nspec = specobjs_list[0][0].shape[0]
# if 'Echelle' in pypeline:
# objid = np.zeros(nexp, dtype=int)
# snr_bar = np.zeros(nexp)
# #norders = specobjs_list[0].ech_orderindx.max() + 1
# for iexp, sobjs in enumerate(specobjs_list):
# uni_objid = np.unique(sobjs.ech_objid)
# nobjs = len(uni_objid)
# order_snr = np.zeros((nslits, nobjs))
# for iord in range(nslits):
# for iobj in range(nobjs):
# ind = (sobjs.ech_orderindx == iord) & (sobjs.ech_objid == uni_objid[iobj])
# flux = sobjs[ind][0].optimal['COUNTS']
# ivar = sobjs[ind][0].optimal['COUNTS_IVAR']
# wave = sobjs[ind][0].optimal['WAVE']
# mask = sobjs[ind][0].optimal['MASK']
# rms_sn, weights = coadd1d.sn_weights(wave, flux, ivar, mask, const_weights=True)
# order_snr[iord, iobj] = rms_sn
#
# # Compute the average SNR and find the brightest object
# snr_bar_vec = np.mean(order_snr, axis=0)
# objid[iexp] = uni_objid[snr_bar_vec.argmax()]
# snr_bar[iexp] = snr_bar_vec[snr_bar_vec.argmax()]
# slitid = None
# else:
# slit_snr_max = np.full((nslits, nexp), -np.inf)
# objid_max = np.zeros((nslits, nexp),dtype=int)
# # Loop over each exposure, slit, find the brighest object on that slit for every exposure
# for iexp, sobjs in enumerate(specobjs_list):
# for islit in range(nslits):
# ithis = sobjs.slitid == islit
# nobj_slit = np.sum(ithis)
# if np.any(ithis):
# objid_this = sobjs[ithis].objid
# flux = np.zeros((nspec, nobj_slit))
# ivar = np.zeros((nspec, nobj_slit))
# wave = np.zeros((nspec, nobj_slit))
# mask = np.zeros((nspec, nobj_slit), dtype=bool)
# for iobj, spec in enumerate(sobjs[ithis]):
# flux[:, iobj] = spec.optimal['COUNTS']
# ivar[:,iobj] = spec.optimal['COUNTS_IVAR']
# wave[:,iobj] = spec.optimal['WAVE']
# mask[:,iobj] = spec.optimal['MASK']
# rms_sn, weights = coadd1d.sn_weights(wave, flux, ivar, mask, None, const_weights=True)
# imax = np.argmax(rms_sn)
# slit_snr_max[islit, iexp] = rms_sn[imax]
# objid_max[islit, iexp] = objid_this[imax]
# # Find the highest snr object among all the slits
# slit_snr = np.mean(slit_snr_max, axis=1)
# slitid = slit_snr.argmax()
# snr_bar_mean = slit_snr[slitid]
# snr_bar = slit_snr_max[slitid, :]
# objid = objid_max[slitid, :]
# if (snr_bar_mean == -np.inf):
# msgs.error('You do not appear to have a unique reference object that was traced as the highest S/N '
# 'ratio on the same slit of every exposure')
#
# # Print out a report on the SNR
# msg_string = msgs.newline() + '-------------------------------------'
# msg_string += msgs.newline() + ' Summary for highest S/N object'
# if 'MultiSlit' in pypeline:
# msg_string += msgs.newline() + ' found on slitid = {:d} '.format(slitid)
#
# msg_string += msgs.newline() + '-------------------------------------'
# msg_string += msgs.newline() + ' exp# S/N'
# for iexp, snr in enumerate(snr_bar):
# msg_string += msgs.newline() + ' {:d} {:5.2f}'.format(iexp, snr)
#
# msg_string += msgs.newline() + '-------------------------------------'
# msgs.info(msg_string)
#
# return objid, slitid, snr_bar
```
#### File: pypeit/core/framematch.py
```python
import os
import re
from collections import OrderedDict
import numpy as np
from pypeit import msgs
from pypeit.bitmask import BitMask
class FrameTypeBitMask(BitMask):
"""
Define a bitmask to set the frame types.
Frame types can be arc, bias, dark, pinhole, pixelflat, science,
standard, or trace.
"""
def __init__(self):
# TODO: This needs to be an OrderedDict for now to ensure that
# the bits assigned to each key is always the same. As of python
# 3.7, normal dict types are guaranteed to preserve insertion
# order as part of its data model. When/if we require python
# 3.7, we can remove this (and other) OrderedDict usage in favor
# of just a normal dict.
frame_types = OrderedDict([
('arc', 'Arc lamp observation used for wavelength calibration'),
('bias', 'Bias readout for detector bias subtraction'),
('dark', 'Shuttered exposure to measure dark current'),
('pinhole', 'Pinhole observation used for tracing slit centers'),
('pixelflat', 'Flat-field exposure used for pixel-to-pixel response'),
('science', 'On-sky observation of a primary target'),
('standard', 'On-sky observation of a flux calibrator'),
('trace', 'High-count exposure used to trace slit positions'),
('tilt', 'Exposure used to trace the tilt in the wavelength solution')
])
super(FrameTypeBitMask, self).__init__(list(frame_types.keys()),
descr=list(frame_types.values()))
def type_names(self, type_bits, join=True):
"""
Use the type bits to get the type names for each frame.
.. todo::
- This should probably be a general function in
:class:`pypeit.bitmask.BitMask`
Args:
type_bits (int, list, numpy.ndarray):
The bit mask for each frame.
bitmask (:class:`pypeit.bitmask.BitMask`, optional):
The bit mask used to pull out the bit names. Uses
:class:`FrameTypeBitMask` by default.
join (:obj:`bool`, optional):
Instead of providing a list of type names for items with
multiple bits tripped, joint the list into a single,
comma-separated string.
Returns:
list: List of the frame types for each frame. Each frame can
have multiple types, meaning the 2nd axis is not necessarily the
same length for all frames.
"""
_type_bits = np.atleast_1d(type_bits)
out = []
for b in _type_bits:
n = self.flagged_bits(b)
if len(n) == 0:
n = ['None']
out += [','.join(n)] if join else [n]
return out[0] if isinstance(type_bits, np.integer) else out
def check_frame_exptime(exptime, exprng):
"""
Check that the exposure time is within the provided range.
Args:
exptime (numpy.ndarray):
Exposure times to check; allowed to be None.
exprng (array-like):
An array with the minimum and maximum exposure. The limits
are *exclusive* and a limit of None means there is no limit.
Returns:
numpy.ndarray: A boolean array that is True for all times within
the provided range. The value is False for any exposure time
that is None or outside the provided range.
Raises:
ValueError:
Raised if the length of `exprng` is not 2.
"""
# Instantiate with all true
indx = exptime != None
if exprng is None:
# No range specified
return indx
if len(exprng) != 2:
# Range not correctly input
raise ValueError('exprng must have two elements.')
if exprng[0] is not None:
indx[indx] &= (exptime[indx] > exprng[0])
if exprng[1] is not None:
indx[indx] &= (exptime[indx] < exprng[1])
return indx
# TODO: May want to keep this in case we ever try to bring it back....
#def group_AB_frames(file_list, targets, coords, max_nod_sep=2):
# """
# Group files into a ABBA or AB sequences.
#
# Args:
# file_list (:obj:`list`):
# A list of file names.
# targets (:obj:`dict`):
# A dictionary that matches each file to a unique target name.
# The target name can be one of the files in the file list.
# coords (:class:`astropy.coordinates.SkyCoord`):
# The coordinates of all the exposures. Number of coordinates
# should match the number of files.
# max_nod_sep (:obj:`int`, optional):
# The maximum separation (arcsec) between the 1st and 4th
# frame sky coordinates in the ABBA sequence that is allowed
# when identifying the sequence. Note that the default (2
# arcsec) is arbitrary.
#
# Returns:
# list:
# A list that matches the length of the input list of files.
# Each file in an AB or ABBA sequence is identified with it's
# pair in the sequence.
# """
#
# AB_frame = [''] * len(file_list)
#
# for key, value in targets.items():
# files = file_list[value]
#
# # Check here that there are more than 1 files and that the
# # number of files is even
# if len(files) == 1:
# msgs.warn('Cannot perform ABBA reduction on targets with 1 file')
# elif len(files) % 2 != 0:
# msgs.warn('Expected an even number of files associated with target ' + key)
#
# # TODO: Check for increasing time? Files are read in numerical
# # sequential order -- should be in order of increasing time
# # anyway..
#
# # Assume that the files are initially in ABBA order and proceed
# ABBA_coords = coords[value]
#
# # Break files into ABBA groups (includes remainder if there are only 2 files)
# file_groups = [files[i:i+4] for i in range(0,len(files),4)]
# ABBA_groups = [ABBA_coords[i:i + 4] for i in range(0, len(ABBA_coords), 4)]
# value_groups = [value[i:i + 4] for i in range(0, len(ABBA_coords), 4)]
#
# for group in range(len(ABBA_groups)):
# if len(ABBA_groups[group]) == 2:
# # Warn user that if there are any groups of only 2
# # files, assuming they are in order of A and B
# msgs.info('Assuming these two frames are A and B frame:'
# + msgs.newline() + file_groups[group][0]
# + msgs.newline() + file_groups[group][1])
# elif len(ABBA_groups[group]) == 4:
# # Check that frames 1, 4 of an ABBA sequence are at the
# # same nod position (A) based on their RA, DEC
# AA_sep = ABBA_coords[0].separation(ABBA_coords[-1]).arcsec
# BB_sep = ABBA_coords[1].separation(ABBA_coords[2]).arcsec
# if AA_sep > max_nod_sep or BB_sep > max_nod_sep:
# if AA_sep > max_nod_sep:
# msgs.warn('Separation between 1st and 4th frame in presumed ABBA sequence '
# 'have a large separation ({0}).'.format(AA_sep))
# if BB_sep > max_nod_sep:
# msgs.warn('Separation between 2nd and 3rd frame in presumed ABBA sequence '
# 'have a large separation ({0}).'.format(BB_sep))
# msgs.warn('Check ABBA identification for target {0} group {1}:'.format(
# target, group) + msgs.newline() + 'A:' + file_groups[group][0]
# + msgs.newline() + 'B:' + file_groups[group][1]
# + msgs.newline() + 'B:' + file_groups[group][2]
# + msgs.newline() + 'A:' + file_groups[group][3])
# else:
# msgs.error('BUG: This should never be reached.')
#
# # Flip group from ABBA to BABA, or AB to BA
# AB_idx_flip = np.copy(value_groups[group])
# AB_idx_flip[::2], AB_idx_flip[1::2] \
# = value_groups[group][1::2], value_groups[group][::2]
#
# # Associate each file in the group with its AB pair
# for i,j in enumerate(value_groups[group]):
# AB_frame[j] = file_list[AB_idx_flip[i]]
#
# return AB_frame
```
#### File: pypeit/images/combineimage.py
```python
import inspect
import os
import numpy as np
from pypeit import msgs
from pypeit.core import combine
from pypeit.par import pypeitpar
from pypeit import utils
from pypeit.images import pypeitimage
from pypeit.images import processrawimage
from pypeit.images import rawimage
from pypeit.images import maskimage
from IPython import embed
class CombineImage(object):
"""
Class to generate an image from one or more files (and other pieces).
The core processing steps are handled by ProcessRawImage
This object is mainly for combining multiple images
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
Spectrograph used to take the data.
det (:obj:`int`, optional):
The 1-indexed detector number to process.
par (:class:`pypeit.par.pypeitpar.ProcessImagesPar`):
Parameters that dictate the processing of the images. See
:class:`pypeit.par.pypeitpar.ProcessImagesPar` for the
defaults.
"""
def __init__(self, spectrograph, det, par, files):
# Required parameters
self.spectrograph = spectrograph
self.det = det
if not isinstance(par, pypeitpar.ProcessImagesPar):
msgs.error('Provided ParSet for must be type ProcessImagesPar.')
self.par = par # This musts be named this way as it is frequently a child
self.files = files
if self.nfiles == 0:
msgs.error('Combineimage requires a list of files to instantiate')
def process_one(self, filename, process_steps, bias, pixel_flat=None, illum_flat=None, bpm=None):
"""
Process a single image
Args:
filename (str):
File to process
process_steps (list):
List of processing steps
bias (np.ndarray or None):
Bias image
pixel_flat (np.ndarray, optional):
Flat image
illum_flat (np.ndarray, optional):
Illumination image
bpm (np.ndarray, optional):
Bad pixel mask
Returns:
:class:`pypeit.images.pypeitimage.PypeItImage`:
"""
# Load raw image
rawImage = rawimage.RawImage(filename, self.spectrograph, self.det)
# Process
processrawImage = processrawimage.ProcessRawImage(rawImage, self.par, bpm=bpm)
processedImage = processrawImage.process(process_steps, bias=bias, pixel_flat=pixel_flat,
illum_flat=illum_flat)
# Return
return processedImage
def run(self, process_steps, bias, pixel_flat=None, illum_flat=None,
ignore_saturation=False, sigma_clip=True, bpm=None, sigrej=None, maxiters=5):
"""
Generate a PypeItImage from a list of images
Mainly a wrapper to coadd2d.weighted_combine()
This may also generate the ivar, crmask, rn2img and mask
Args:
process_steps (list):
bias (np.ndarray or None):
Bias image or instruction
pixel_flat (np.ndarray, optional):
Flat image
illum_flat (np.ndarray, optional):
Illumination image
sigma_clip (bool, optional):
Perform sigma clipping
sigrej (int or float, optional): Rejection threshold for sigma clipping.
Code defaults to determining this automatically based on the number of images provided.
maxiters (int, optional):
Number of iterations for the clipping
bpm (np.ndarray, optional):
Bad pixel mask. Held in ImageMask
ignore_saturation (bool, optional):
If True, turn off the saturation flag in the individual images before stacking
This avoids having such values set to 0 which for certain images (e.g. flat calibrations)
can have unintended consequences.
Returns:
:class:`pypeit.images.pypeitimage.PypeItImage`:
"""
# Loop on the files
nimages = len(self.files)
for kk, ifile in enumerate(self.files):
# Process a single image
pypeitImage = self.process_one(ifile, process_steps, bias, pixel_flat=pixel_flat,
illum_flat=illum_flat, bpm=bpm)
# Are we all done?
if len(self.files) == 1:
return pypeitImage
elif kk == 0:
# Get ready
shape = (nimages, pypeitImage.bpm.shape[0], pypeitImage.bpm.shape[1])
img_stack = np.zeros(shape)
ivar_stack= np.zeros(shape)
rn2img_stack = np.zeros(shape)
crmask_stack = np.zeros(shape, dtype=bool)
# Mask
bitmask = maskimage.ImageBitMask()
mask_stack = np.zeros(shape, bitmask.minimum_dtype(asuint=True))
# Process
img_stack[kk,:,:] = pypeitImage.image
# Construct raw variance image and turn into inverse variance
if pypeitImage.ivar is not None:
ivar_stack[kk, :, :] = pypeitImage.ivar
else:
ivar_stack[kk, :, :] = 1.
# Mask cosmic rays
if pypeitImage.crmask is not None:
crmask_stack[kk, :, :] = pypeitImage.crmask
# Read noise squared image
if pypeitImage.rn2img is not None:
rn2img_stack[kk, :, :] = pypeitImage.rn2img
# Final mask for this image
# TODO This seems kludgy to me. Why not just pass ignore_saturation to process_one and ignore the saturation
# when the mask is actually built, rather than untoggling the bit here
if ignore_saturation: # Important for calibrations as we don't want replacement by 0
indx = pypeitImage.bitmask.flagged(pypeitImage.mask, flag=['SATURATION'])
pypeitImage.mask[indx] = pypeitImage.bitmask.turn_off(pypeitImage.mask[indx], 'SATURATION')
mask_stack[kk, :, :] = pypeitImage.mask
# Coadd them
weights = np.ones(nimages)/float(nimages)
img_list = [img_stack]
var_stack = utils.inverse(ivar_stack)
var_list = [var_stack, rn2img_stack]
img_list_out, var_list_out, outmask, nused = combine.weighted_combine(
weights, img_list, var_list, (mask_stack == 0),
sigma_clip=sigma_clip, sigma_clip_stack=img_stack, sigrej=sigrej, maxiters=maxiters)
# Build the last one
final_pypeitImage = pypeitimage.PypeItImage(img_list_out[0],
ivar=utils.inverse(var_list_out[0]),
bpm=pypeitImage.bpm,
rn2img=var_list_out[1],
crmask=np.invert(outmask),
binning=pypeitImage.binning)
nonlinear_counts = self.spectrograph.nonlinear_counts(self.det,
apply_gain='apply_gain' in process_steps)
final_pypeitImage.build_mask(final_pypeitImage.image, final_pypeitImage.ivar,
saturation=nonlinear_counts, #self.spectrograph.detector[self.det-1]['saturation'],
mincounts=self.spectrograph.detector[self.det-1]['mincounts'])
# Return
return final_pypeitImage
@property
def nfiles(self):
"""
Number of files in the files attribute
Returns:
int
"""
return len(self.files) if isinstance(self.files, (np.ndarray, list)) else 0
```
#### File: pypeit/images/scienceimage.py
```python
import inspect
import os
import numpy as np
from pypeit import msgs
from pypeit.core import procimg
from pypeit.par import pypeitpar
from pypeit import utils
from pypeit.images import pypeitimage
from pypeit.images import combineimage
from IPython import embed
class ScienceImage(pypeitimage.PypeItImage):
"""
Class to generate and hold a science image
Child of PypeItImage
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
Spectrograph used to take the data.
det (:obj:`int`):
The 1-indexed detector number to process.
par (:class:`pypeit.par.pypeitpar.ProcessImagesPar`):
Parameters that dictate the processing of the images. See
:class:`pypeit.par.pypeitpar.ProcessImagesPar` for the
defaults.
image (np.ndarray):
ivar (np.ndarray):
bpm (np.ndarray):
Bad pixel mask. Held in ImageMask
rn2img (np.ndarray, optional):
crmask (np.ndarray, optional):
mask (np.ndarray, optional):
files (list, optional):
List of filenames that went into the loaded image
"""
frametype = 'science'
def __init__(self, spectrograph, det, par, image, ivar, bpm, rn2img=None,
crmask=None, mask=None, files=[]):
# Init me
pypeitimage.PypeItImage.__init__(self, image, ivar=ivar, rn2img=rn2img,
bpm=bpm, crmask=crmask, mask=mask)
# Required attribs
self.spectrograph = spectrograph
if not isinstance(par, pypeitpar.ProcessImagesPar):
msgs.error('Provided ParSet for must be type ProcessImagesPar.')
self.par = par
self.det = det
# Not required
self.files = files
def build_crmask(self, subtract_img=None):
"""
Call to ImageMask.build_crmask which will
generate the cosmic ray mask
Args:
subtract_img (np.ndarray, optional):
Image to be subtracted off of self.image prior to CR evaluation
Returns:
np.ndarray: Boolean array of self.crmask
"""
return super(ScienceImage, self).build_crmask(self.spectrograph, self.det,
self.par, self.image,
utils.inverse(self.ivar),
subtract_img=subtract_img).copy()
def build_mask(self, saturation=1e10, mincounts=-1e10, slitmask=None):
"""
Call to ImageMask.build_mask()
This generates the full Image mask
Args:
saturation (float, optional):
mincounts (float, optional):
slitmask (np.ndarray, optional):
Returns:
np.ndarray: The full mask, held in self.mask
"""
super(ScienceImage, self).build_mask(self.image, self.ivar,
saturation=saturation,
mincounts=mincounts,
slitmask=slitmask)
return self.mask.copy()
def update_mask_cr(self, subtract_img=None):
"""
Updates the CR mask values in self.mask
through a call to ImageMask.build_crmask which
generates a new CR mask and then a call to
ImageMask.update_mask_cr() which updates self.mask
Args:
subtract_img (np.ndarray, optional):
If provided, this is subtracted from self.image prior to
CR masking
"""
# Generate the CR mask (and save in self.crmask)
super(ScienceImage, self).build_crmask(self.spectrograph, self.det,
self.par, self.image,
utils.inverse(self.ivar),
subtract_img=subtract_img).copy()
# Now update the mask
super(ScienceImage, self).update_mask_cr(self.crmask)
def __sub__(self, other):
"""
Subtract a ScienceImage object from another
Extras (e.g. ivar, masks) are included if they are present
Args:
other (ScienceImage):
Returns:
ScienceImage:
"""
if not isinstance(other, ScienceImage):
msgs.error("Misuse of the subtract method")
# Images
newimg = self.image - other.image
# Mask time
outmask_comb = (self.mask == 0) & (other.mask == 0)
# Variance
if self.ivar is not None:
new_ivar = utils.inverse(utils.inverse(self.ivar) + utils.inverse(other.ivar))
new_ivar[np.invert(outmask_comb)] = 0
else:
new_ivar = None
# RN2
if self.rn2img is not None:
new_rn2 = self.rn2img + other.rn2img
else:
new_rn2 = None
# Files
new_files = self.files + other.files
# Instantiate
new_sciImg = ScienceImage(self.spectrograph, self.det, self.par,
newimg, new_ivar, self.bpm, rn2img=new_rn2, files=new_files)
#TODO: KW properly handle adding the bits
crmask_diff = new_sciImg.build_crmask()
# crmask_eff assumes evertything masked in the outmask_comb is a CR in the individual images
new_sciImg.crmask = crmask_diff | np.invert(outmask_comb)
# Note that the following uses the saturation and mincounts held in
# self.spectrograph.detector[self.det-1]
new_sciImg.build_mask()
return new_sciImg
def __repr__(self):
repr = '<{:s}: files={}'.format(self.__class__.__name__, self.files)
# Image
rdict = {}
for attr in ['image', 'ivar', 'rn2img', 'crmask', 'mask']:
if getattr(self, attr) is not None:
rdict[attr] = True
else:
rdict[attr] = False
repr += ' images={}'.format(rdict)
repr = repr + '>'
return repr
def build_from_file_list(spectrograph, det, par, bpm,
file_list, bias, pixel_flat, illum_flat=None,
sigma_clip=False, sigrej=None, maxiters=5):
"""
Build a ScienceImage from a file list
using a default set of process steps
This will also generate the ivar, crmask, rn2img and mask
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
Spectrograph used to take the data.
det (:obj:`int`):
The 1-indexed detector number to process.
par (:class:`pypeit.par.pypeitpar.ProcessImagesPar`):
Parameters that dictate the processing of the images. See
:class:`pypeit.par.pypeitpar.ProcessImagesPar` for the
defaults.
bpm (np.ndarray):
Bad pixel mask. Held in ImageMask
file_list (list):
List of files
bias (np.ndarray or None):
Bias image
pixel_flat (np.ndarray):
Flat image
illum_flat (np.ndarray, optional):
Illumination image
sigrej (int or float, optional): Rejection threshold for sigma clipping.
Code defaults to determining this automatically based on the numberr of images provided.
maxiters (int, optional):
Returns:
ScienceImage:
"""
# Process steps
process_steps = procimg.init_process_steps(bias, par)
process_steps += ['trim', 'apply_gain', 'orient']
if (pixel_flat is not None) or (illum_flat is not None):
process_steps += ['flatten']
process_steps += ['extras']
if par['cr_reject']:
process_steps += ['crmask']
combineImage = combineimage.CombineImage(spectrograph, det, par, file_list)
pypeitImage = combineImage.run(process_steps, bias, bpm=bpm, pixel_flat=pixel_flat,
illum_flat=illum_flat, sigma_clip=sigma_clip,
sigrej=sigrej, maxiters=maxiters)
# Instantiate
slf = ScienceImage(spectrograph, det, par, pypeitImage.image, pypeitImage.ivar,
pypeitImage.bpm, rn2img=pypeitImage.rn2img,
crmask=pypeitImage.crmask, mask=pypeitImage.mask,
files=file_list)
# Return
return slf
```
#### File: pypeit/scripts/object_finding.py
```python
import os
import argparse
import numpy as np
from astropy.table import Table
from astropy.io import fits
from pypeit.core.gui import object_find as gui_object_find
from pypeit import msgs
from pypeit.core.parse import get_dnum
from pypeit.traceslits import TraceSlits
from pypeit import edgetrace
from pypeit.masterframe import MasterFrame
from pypeit.core import trace_slits
def parser(options=None):
parser = argparse.ArgumentParser(description='Display sky subtracted, spec2d image in the'
'interactive object finding GUI. Run above'
'the Science/ folder',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file', type = str, default=None, help='PYPEIT spec2d file')
parser.add_argument("--list", default=False, help="List the extensions only?",
action="store_true")
parser.add_argument('--det', default=1, type=int, help="Detector")
parser.add_argument("--old", default=False, action="store_true", help="Used old slit tracing")
return parser.parse_args() if options is None else parser.parse_args(options)
def parse_traces(hdulist_1d, det_nm):
"""Extract the relevant trace information
"""
traces = dict(traces=[], fwhm=[])
pkflux = []
for hdu in hdulist_1d:
if det_nm in hdu.name:
tbl = Table(hdu.data)
trace = tbl['TRACE']
fwhm = tbl['FWHM']
obj_id = hdu.name.split('-')[0]
traces['traces'].append(trace.copy())
traces['fwhm'].append(np.median(fwhm))
pkflux.append(np.median(tbl['BOX_COUNTS']))
traces['pkflux'] = np.array(pkflux)
return traces
def main(args):
# List only?
hdu = fits.open(args.file)
head0 = hdu[0].header
if args.list:
hdu.info()
return
# Init
sdet = get_dnum(args.det, prefix=False)
# One detector, sky sub for now
names = [hdu[i].name for i in range(len(hdu))]
try:
exten = names.index('DET{:s}-PROCESSED'.format(sdet))
except: # Backwards compatability
msgs.error('Requested detector {:s} was not processed.\n'
'Maybe you chose the wrong one to view?\n'
'Set with --det= or check file contents with --list'.format(sdet))
sciimg = hdu[exten].data
try:
exten = names.index('DET{:s}-SKY'.format(sdet))
except: # Backwards compatability
msgs.error('Requested detector {:s} has no sky model.\n'
'Maybe you chose the wrong one to view?\n'
'Set with --det= or check file contents with --list'.format(sdet))
skymodel = hdu[exten].data
try:
exten = names.index('DET{:s}-MASK'.format(sdet))
except ValueError: # Backwards compatability
msgs.error('Requested detector {:s} has no bit mask.\n'
'Maybe you chose the wrong one to view?\n'
'Set with --det= or check file contents with --list'.format(sdet))
mask = hdu[exten].data
frame = (sciimg - skymodel) * (mask == 0)
mdir = head0['PYPMFDIR']
if not os.path.exists(mdir):
mdir_base = os.path.join(os.getcwd(), os.path.basename(mdir))
msgs.warn('Master file dir: {0} does not exist. Using {1}'.format(mdir, mdir_base))
mdir = mdir_base
trace_key = '{0}_{1:02d}'.format(head0['TRACMKEY'], args.det)
trc_file = os.path.join(mdir, MasterFrame.construct_file_name('Trace', trace_key))
# TODO -- Remove this once the move to Edges is complete
if args.old:
tslits_dict = TraceSlits.load_from_file(trc_file)[0]
else:
trc_file = trc_file.replace('Trace', 'Edges')+'.gz'
tslits_dict = edgetrace.EdgeTraceSet.from_file(trc_file).convert_to_tslits_dict()
shape = (tslits_dict['nspec'], tslits_dict['nspat'])
slit_ids = [trace_slits.get_slitid(shape, tslits_dict['slit_left'], tslits_dict['slit_righ'], ii)[0]
for ii in range(tslits_dict['slit_left'].shape[1])]
# Object traces
spec1d_file = args.file.replace('spec2d', 'spec1d')
det_nm = 'DET{:s}'.format(sdet)
if os.path.isfile(spec1d_file):
hdulist_1d = fits.open(spec1d_file)
else:
hdulist_1d = []
msgs.warn('Could not find spec1d file: {:s}'.format(spec1d_file) + msgs.newline() +
' No objects were extracted.')
tslits_dict['objtrc'] = parse_traces(hdulist_1d, det_nm)
# TODO :: Need to include standard star trace in the spec2d files
std_trace = None
# Extract some trace models
fwhm = 2 # Start with some default value
# Brightest object on slit
trace_model_obj = None
trace_model_dict = dict()
if len(tslits_dict['objtrc']['pkflux']) > 0:
smash_peakflux = tslits_dict['objtrc']['pkflux']
ibri = smash_peakflux.argmax()
trace_model_obj = tslits_dict['objtrc']['traces'][ibri]
fwhm = tslits_dict['objtrc']['fwhm'][ibri]
trace_model_dict['object'] = dict(trace_model=trace_model_obj, fwhm=fwhm)
# Standard star trace
trace_model_dict['std'] = dict(trace_model=std_trace, fwhm=fwhm)
# Trace of the slit edge
trace_model_dict['slit'] = dict(trace_model=tslits_dict['slit_left'].copy(), fwhm=fwhm)
tslits_dict['trace_model'] = trace_model_dict
# Finally, initialise the GUI
gui_object_find.initialise(args.det, frame, tslits_dict, None, printout=True, slit_ids=slit_ids)
```
#### File: pypeit/scripts/ql_keck_nires.py
```python
import argparse
from pypeit import msgs
import warnings
def parser(options=None):
parser = argparse.ArgumentParser(description='Script to run PypeIt on a pair of NIRES files (A-B)')
parser.add_argument('full_rawpath', type=str, help='Full path to the raw files')
parser.add_argument('fileA', type=str, help='A frame')
parser.add_argument('fileB', type=str, help='B frame')
parser.add_argument('-b', '--box_radius', type=float, help='Set the radius for the boxcar extraction')
if options is None:
pargs = parser.parse_args()
else:
pargs = parser.parse_args(options)
return pargs
def main(pargs):
import os
import sys
import numpy as np
from IPython import embed
from pypeit import pypeit
from pypeit import pypeitsetup
from pypeit.core import framematch
# Setup
data_files = [os.path.join(pargs.full_rawpath, pargs.fileA), os.path.join(pargs.full_rawpath,pargs.fileB)]
ps = pypeitsetup.PypeItSetup(data_files, path='./', spectrograph_name='keck_nires')
ps.build_fitstbl()
# TODO -- Get the type_bits from 'science'
bm = framematch.FrameTypeBitMask()
file_bits = np.zeros(2, dtype=bm.minimum_dtype())
file_bits[0] = bm.turn_on(file_bits[0], ['arc', 'science', 'tilt'])
file_bits[1] = bm.turn_on(file_bits[0], ['arc', 'science', 'tilt'])
ps.fitstbl.set_frame_types(file_bits)
ps.fitstbl.set_combination_groups()
# Extras
ps.fitstbl['setup'] = 'A'
# A-B
ps.fitstbl['bkg_id'] = [2,1]
# Calibrations
master_dir = os.getenv('NIRES_MASTERS')
if master_dir is None:
msgs.error("You need to set an Environmental variable NIRES_MASTERS that points at the Master Calibs")
# Config the run
cfg_lines = ['[rdx]']
cfg_lines += [' spectrograph = {0}'.format('keck_nires')]
cfg_lines += [' redux_path = {0}'.format(os.path.join(os.getcwd(),'keck_nires_A'))]
cfg_lines += ['[calibrations]']
cfg_lines += [' caldir = {0}'.format(master_dir)]
cfg_lines += [' [[scienceframe]]']
cfg_lines += [' [[process]]']
cfg_lines += [' cr_reject = False']
cfg_lines += ['[scienceimage]']
cfg_lines += [' [[extraction]]']
cfg_lines += [' skip_optimal = True']
if pargs.box_radius is not None: # Boxcar radius
cfg_lines += [' boxcar_radius = {0}'.format(pargs.box_radius)]
cfg_lines += [' [[findobj]]']
cfg_lines += [' skip_second_find = True']
# Write
ofiles = ps.fitstbl.write_pypeit('', configs=['A'], write_bkg_pairs=True, cfg_lines=cfg_lines)
if len(ofiles) > 1:
msgs.error("Bad things happened..")
# Instantiate the main pipeline reduction object
pypeIt = pypeit.PypeIt(ofiles[0], verbosity=2,
reuse_masters=True, overwrite=True,
logname='nires_proc_AB.log', show=False)
# Run
pypeIt.reduce_all()
msgs.info('Data reduction complete')
# QA HTML
msgs.info('Generating QA HTML')
pypeIt.build_qa()
return 0
```
#### File: pypeit/spectrographs/keck_deimos.py
```python
import glob
import re
import os
import numpy as np
import warnings
from scipy import interpolate
from astropy.io import fits
from pypeit import msgs
from pypeit import telescopes
from pypeit.core import parse
from pypeit.core import framematch
from pypeit.par import pypeitpar
from pypeit.spectrographs import spectrograph
from pypeit.utils import index_of_x_eq_y
from pypeit.spectrographs.slitmask import SlitMask
from pypeit.spectrographs.opticalmodel import ReflectionGrating, OpticalModel, DetectorMap
from IPython import embed
class KeckDEIMOSSpectrograph(spectrograph.Spectrograph):
"""
Child to handle Keck/DEIMOS specific code
"""
def __init__(self):
# Get it started
super(KeckDEIMOSSpectrograph, self).__init__()
self.spectrograph = 'keck_deimos'
self.telescope = telescopes.KeckTelescopePar()
self.camera = 'DEIMOS'
self.detector = [
# Detector 1
pypeitpar.DetectorPar(
dataext = 1,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 4.19,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.226,
ronoise = 2.570,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_01'
),
# Detector 2
pypeitpar.DetectorPar(
dataext = 2,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 3.46,
saturation = 65535.,
nonlinear = 0.95,
numamplifiers = 1,
gain = 1.188,
ronoise = 2.491,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_02'
),
# Detector 3
pypeitpar.DetectorPar(
dataext = 3,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 4.03,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.248,
ronoise = 2.618,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_03'
),
# Detector 4
pypeitpar.DetectorPar(
dataext = 4,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 3.80,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.220,
ronoise = 2.557,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_04'
),
# Detector 5
pypeitpar.DetectorPar(
dataext = 5,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 4.71,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.184,
ronoise = 2.482,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_05'
),
# Detector 6
pypeitpar.DetectorPar(
dataext = 6,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 4.28,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.177,
ronoise = 2.469,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_06'
),
# Detector 7
pypeitpar.DetectorPar(
dataext = 7,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 3.33,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.201,
ronoise = 2.518,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_07'),
# Detector 8
pypeitpar.DetectorPar(
dataext = 8,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 3.69,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.230,
ronoise = 2.580,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_08'
)]
self.numhead = 9
# Uses default timeunit
# Uses default primary_hdrext
# self.sky_file ?
# Don't instantiate these until they're needed
self.grating = None
self.optical_model = None
self.detector_map = None
# TODO: I think all of the default_pypeit_par methods should be
# static. nonlinear_counts shouldn't need to be a parameter because
# it's held by the spectrograph class, right?
def default_pypeit_par(self):
"""
Set default parameters for Keck DEIMOS reductions.
"""
par = pypeitpar.PypeItPar()
par['rdx']['spectrograph'] = 'keck_deimos'
par['flexure']['method'] = 'boxcar'
# Set wave tilts order
par['calibrations']['slitedges']['edge_thresh'] = 50.
par['calibrations']['slitedges']['fit_order'] = 3
par['calibrations']['slitedges']['minimum_slit_gap'] = 0.25
par['calibrations']['slitedges']['minimum_slit_length'] = 4.
par['calibrations']['slitedges']['sync_clip'] = False
# 1D wavelength solution
par['calibrations']['wavelengths']['lamps'] = ['ArI','NeI','KrI','XeI']
par['calibrations']['wavelengths']['nonlinear_counts'] \
= self.detector[0]['nonlinear'] * self.detector[0]['saturation']
par['calibrations']['wavelengths']['n_first'] = 3
par['calibrations']['wavelengths']['match_toler'] = 2.5
# Alter the method used to combine pixel flats
par['calibrations']['pixelflatframe']['process']['combine'] = 'median'
par['calibrations']['pixelflatframe']['process']['sig_lohi'] = [10.,10.]
# Set the default exposure time ranges for the frame typing
par['calibrations']['biasframe']['exprng'] = [None, 2]
par['calibrations']['darkframe']['exprng'] = [999999, None] # No dark frames
par['calibrations']['pinholeframe']['exprng'] = [999999, None] # No pinhole frames
par['calibrations']['pixelflatframe']['exprng'] = [None, 30]
par['calibrations']['traceframe']['exprng'] = [None, 30]
par['scienceframe']['exprng'] = [30, None]
# LACosmics parameters
par['scienceframe']['process']['sigclip'] = 4.0
par['scienceframe']['process']['objlim'] = 1.5
return par
def config_specific_par(self, scifile, inp_par=None):
"""
Modify the PypeIt parameters to hard-wired values used for
specific instrument configurations.
.. todo::
Document the changes made!
Args:
scifile (str):
File to use when determining the configuration and how
to adjust the input parameters.
inp_par (:class:`pypeit.par.parset.ParSet`, optional):
Parameter set used for the full run of PypeIt. If None,
use :func:`default_pypeit_par`.
Returns:
:class:`pypeit.par.parset.ParSet`: The PypeIt paramter set
adjusted for configuration specific parameter values.
"""
par = self.default_pypeit_par() if inp_par is None else inp_par
headarr = self.get_headarr(scifile)
# Turn PCA off for long slits
# TODO: I'm a bit worried that this won't catch all
# long-slits...
if ('Long' in self.get_meta_value(headarr, 'decker')) or (
'LVMslit' in self.get_meta_value(headarr, 'decker')):
par['calibrations']['slitedges']['sync_predict'] = 'nearest'
# Templates
if self.get_meta_value(headarr, 'dispname') == '600ZD':
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_deimos_600.fits'
par['calibrations']['wavelengths']['lamps'] += ['CdI', 'ZnI', 'HgI']
elif self.get_meta_value(headarr, 'dispname') == '830G':
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_deimos_830G.fits'
elif self.get_meta_value(headarr, 'dispname') == '1200G':
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_deimos_1200G.fits'
# FWHM
binning = parse.parse_binning(self.get_meta_value(headarr, 'binning'))
par['calibrations']['wavelengths']['fwhm'] = 6.0 / binning[1]
# Return
return par
def init_meta(self):
"""
Generate the meta data dict
Note that the children can add to this
Returns:
self.meta: dict (generated in place)
"""
meta = {}
# Required (core)
meta['ra'] = dict(ext=0, card='RA')
meta['dec'] = dict(ext=0, card='DEC')
meta['target'] = dict(ext=0, card='TARGNAME')
meta['decker'] = dict(ext=0, card='SLMSKNAM')
meta['binning'] = dict(card=None, compound=True)
meta['mjd'] = dict(ext=0, card='MJD-OBS')
meta['exptime'] = dict(ext=0, card='ELAPTIME')
meta['airmass'] = dict(ext=0, card='AIRMASS')
meta['dispname'] = dict(ext=0, card='GRATENAM')
# Extras for config and frametyping
meta['hatch'] = dict(ext=0, card='HATCHPOS')
meta['dispangle'] = dict(card=None, compound=True, rtol=1e-5)
# Image type
meta['idname'] = dict(ext=0, card='OBSTYPE')
# Lamps
meta['lampstat01'] = dict(ext=0, card='LAMPS')
# Ingest
self.meta = meta
def compound_meta(self, headarr, meta_key):
"""
Args:
headarr: list
meta_key: str
Returns:
value
"""
if meta_key == 'binning':
binspatial, binspec = parse.parse_binning(headarr[0]['BINNING'])
binning = parse.binning2string(binspec, binspatial)
return binning
elif meta_key == 'dispangle':
if headarr[0]['GRATEPOS'] == 3:
return headarr[0]['G3TLTWAV']
elif headarr[0]['GRATEPOS'] == 4:
return headarr[0]['G4TLTWAV']
else:
msgs.warn('This is probably a problem. Non-standard DEIMOS GRATEPOS={0}.'.format(headarr[0]['GRATEPOS']))
else:
msgs.error("Not ready for this compound meta")
def configuration_keys(self):
"""
Return the metadata keys that defines a unique instrument
configuration.
This list is used by :class:`pypeit.metadata.PypeItMetaData` to
identify the unique configurations among the list of frames read
for a given reduction.
Returns:
list: List of keywords of data pulled from meta
"""
return ['dispname', 'decker', 'binning', 'dispangle']
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
if ftype == 'science':
#return good_exp & (fitstbl['lampstat01'] == 'Off') & (fitstbl['hatch'] == 'open')
return good_exp & (fitstbl['lampstat01'] == 'Off') & (fitstbl['hatch'] == 'open')
if ftype == 'bias':
return good_exp & (fitstbl['lampstat01'] == 'Off') & (fitstbl['hatch'] == 'closed')
if ftype in ['pixelflat', 'trace']:
# Flats and trace frames are typed together
return good_exp & (fitstbl['idname'] == 'IntFlat') & (fitstbl['hatch'] == 'closed')
if ftype in ['pinhole', 'dark']:
# Don't type pinhole or dark frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['arc', 'tilt']:
return good_exp & (fitstbl['idname'] == 'Line') & (fitstbl['hatch'] == 'closed')
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
# TODO: We should aim to get rid of this...
def idname(self, ftype):
"""
Return the `idname` for the selected frame type for this instrument.
Args:
ftype (str):
File type, which should be one of the keys in
:class:`pypeit.core.framematch.FrameTypeBitMask`.
Returns:
str: The value of `idname` that should be available in the
`PypeItMetaData` instance that identifies frames of this
type.
"""
# TODO: Fill in the rest of these.
name = { 'arc': 'Line',
'tilt': None,
'bias': None,
'dark': None,
'pinhole': None,
'pixelflat': 'IntFlat',
'science': 'Object',
'standard': None,
'trace': 'IntFlat' }
return name[ftype]
def get_rawimage(self, raw_file, det):
"""
Read a raw DEIMOS data frame (one or more detectors).
Data are unpacked from the multi-extension HDU. Function is
based :func:`pypeit.spectrographs.keck_lris.read_lris`, which
was based on the IDL procedure ``readmhdufits.pro``.
Parameters
----------
raw_file : str
Filename
Returns
-------
array : ndarray
Combined image
hdu: HDUList
sections : tuple
List of datasec, oscansec sections
"""
# Check for file; allow for extra .gz, etc. suffix
fil = glob.glob(raw_file + '*')
if len(fil) != 1:
msgs.error('Found {0} files matching {1}'.format(len(fil), raw_file + '*'))
# Read
try:
msgs.info("Reading DEIMOS file: {:s}".format(fil[0]))
except AttributeError:
print("Reading DEIMOS file: {:s}".format(fil[0]))
hdu = fits.open(fil[0])
head0 = hdu[0].header
# Get post, pre-pix values
postpix = head0['POSTPIX']
detlsize = head0['DETLSIZE']
x0, x_npix, y0, y_npix = np.array(parse.load_sections(detlsize)).flatten()
# Create final image
if det is None:
image = np.zeros((x_npix, y_npix + 4 * postpix))
rawdatasec_img = np.zeros_like(image, dtype=int)
oscansec_img = np.zeros_like(image, dtype=int)
# get the x and y binning factors...
binning = head0['BINNING']
if binning != '1,1':
msgs.error("This binning for DEIMOS might not work. But it might..")
# DEIMOS detectors
nchip = 8
if det is None:
chips = range(nchip)
else:
chips = [det - 1] # Indexing starts at 0 here
# Loop
for tt in chips:
data, oscan = deimos_read_1chip(hdu, tt + 1)
# One detector??
if det is not None:
image = np.zeros((data.shape[0], data.shape[1] + oscan.shape[1]))
rawdatasec_img = np.zeros_like(image, dtype=int)
oscansec_img = np.zeros_like(image, dtype=int)
# Indexing
x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2 = indexing(tt, postpix, det=det)
# Fill
image[y1:y2, x1:x2] = data
rawdatasec_img[y1:y2, x1:x2] = 1 # Amp
image[o_y1:o_y2, o_x1:o_x2] = oscan
oscansec_img[o_y1:o_y2, o_x1:o_x2] = 1 # Amp
# Return
exptime = hdu[self.meta['exptime']['ext']].header[self.meta['exptime']['card']]
return image, hdu, exptime, rawdatasec_img, oscansec_img
#return image, hdu, (dsec, osec)
'''
def load_raw_frame(self, raw_file, det=None):
"""
Wrapper to the raw image reader for DEIMOS
Args:
raw_file: str, filename
det: int, REQUIRED
Desired detector
**null_kwargs:
Captured and never used
Returns:
raw_img: ndarray
Raw image; likely unsigned int
head0: Header
"""
raw_img, hdu, _ = read_deimos(raw_file, det=det)
return raw_img, hdu
'''
'''
def get_image_section(self, inp=None, det=1, section='datasec'):
"""
Return a string representation of a slice defining a section of
the detector image.
Overwrites base class function
Args:
inp (:obj:`str`, `astropy.io.fits.Header`_, optional):
String providing the file name to read, or the relevant
header object. Default is None, meaning that the
detector attribute must provide the image section
itself, not the header keyword.
det (:obj:`int`, optional):
1-indexed detector number.
section (:obj:`str`, optional):
The section to return. Should be either 'datasec' or
'oscansec', according to the
:class:`pypeitpar.DetectorPar` keywords.
Returns:
tuple: Returns three objects: (1) A list of string
representations for the image sections, one string per
amplifier. The sections are *always* returned in PypeIt
order: spectral then spatial. (2) Boolean indicating if the
slices are one indexed. (3) Boolean indicating if the
slices should include the last pixel. The latter two are
always returned as True following the FITS convention.
"""
# Read the file
if inp is None:
msgs.error('Must provide Keck DEIMOS file or hdulist to get image section.')
# Read em
shape, datasec, oscansec, _ = deimos_image_sections(inp, det)
if section == 'datasec':
return datasec, False, False
elif section == 'oscansec':
return oscansec, False, False
else:
raise ValueError('Unrecognized keyword: {0}'.format(section))
def get_raw_image_shape(self, hdulist, det=None, **null_kwargs):
"""
Overrides :class:`Spectrograph.get_image_shape` for LRIS images.
Must always provide a file.
"""
# Do it
self._check_detector()
shape, datasec, oscansec, _ = deimos_image_sections(hdulist, det)
self.naxis = shape
return self.naxis
'''
def bpm(self, filename, det, shape=None):
"""
Override parent bpm function with BPM specific to DEIMOS.
.. todo::
Allow for binning changes.
Parameters
----------
det : int, REQUIRED
**null_kwargs:
Captured and never used
Returns
-------
bpix : ndarray
0 = ok; 1 = Mask
"""
bpm_img = self.empty_bpm(filename, det, shape=shape)
if det == 1:
bpm_img[:,1052:1054] = 1
elif det == 2:
bpm_img[:,0:4] = 1
bpm_img[:,376:381] = 1
bpm_img[:,489] = 1
bpm_img[:,1333:1335] = 1
bpm_img[:,2047] = 1
elif det == 3:
bpm_img[:,0:4] = 1
bpm_img[:,221] = 1
bpm_img[:,260] = 1
bpm_img[:,366] = 1
bpm_img[:,816:819] = 1
bpm_img[:,851] = 1
bpm_img[:,940] = 1
bpm_img[:,1167] = 1
bpm_img[:,1280] = 1
bpm_img[:,1301:1303] = 1
bpm_img[:,1744:1747] = 1
bpm_img[:,-4:] = 1
elif det == 4:
bpm_img[:,0:4] = 1
bpm_img[:,47] = 1
bpm_img[:,744] = 1
bpm_img[:,790:792] = 1
bpm_img[:,997:999] = 1
elif det == 5:
bpm_img[:,25:27] = 1
bpm_img[:,128:130] = 1
bpm_img[:,1535:1539] = 1
elif det == 7:
bpm_img[:,426:428] = 1
bpm_img[:,676] = 1
bpm_img[:,1176:1178] = 1
elif det == 8:
bpm_img[:,440] = 1
bpm_img[:,509:513] = 1
bpm_img[:,806] = 1
bpm_img[:,931:934] = 1
return bpm_img
def get_slitmask(self, filename):
"""
Parse the slitmask data from a DEIMOS file into a
:class:`pypeit.spectrographs.slitmask.SlitMask` object.
Args:
filename (:obj:`str`):
Name of the file to read.
"""
# Open the file
hdu = fits.open(filename)
# Build the object data
# - Find the index of the object IDs in the slit-object
# mapping that match the object catalog
mapid = hdu['SlitObjMap'].data['ObjectID']
catid = hdu['ObjectCat'].data['ObjectID']
indx = index_of_x_eq_y(mapid, catid)
# - Pull out the slit ID, object ID, and object coordinates
objects = np.array([hdu['SlitObjMap'].data['dSlitId'][indx].astype(float),
catid.astype(float), hdu['ObjectCat'].data['RA_OBJ'],
hdu['ObjectCat'].data['DEC_OBJ']]).T
# - Only keep the objects that are in the slit-object mapping
objects = objects[mapid[indx] == catid]
# Match the slit IDs in DesiSlits to those in BluSlits
indx = index_of_x_eq_y(hdu['DesiSlits'].data['dSlitId'], hdu['BluSlits'].data['dSlitId'],
strict=True)
# Instantiate the slit mask object and return it
self.slitmask = SlitMask(np.array([hdu['BluSlits'].data['slitX1'],
hdu['BluSlits'].data['slitY1'],
hdu['BluSlits'].data['slitX2'],
hdu['BluSlits'].data['slitY2'],
hdu['BluSlits'].data['slitX3'],
hdu['BluSlits'].data['slitY3'],
hdu['BluSlits'].data['slitX4'],
hdu['BluSlits'].data['slitY4']]).T.reshape(-1,4,2),
slitid=hdu['BluSlits'].data['dSlitId'],
align=hdu['DesiSlits'].data['slitTyp'][indx] == 'A',
science=hdu['DesiSlits'].data['slitTyp'][indx] == 'P',
onsky=np.array([hdu['DesiSlits'].data['slitRA'][indx],
hdu['DesiSlits'].data['slitDec'][indx],
hdu['DesiSlits'].data['slitLen'][indx],
hdu['DesiSlits'].data['slitWid'][indx],
hdu['DesiSlits'].data['slitLPA'][indx]]).T,
objects=objects)
return self.slitmask
def get_grating(self, filename):
"""
Taken from xidl/DEEP2/spec2d/pro/deimos_omodel.pro and
xidl/DEEP2/spec2d/pro/deimos_grating.pro
"""
hdu = fits.open(filename)
# Grating slider
slider = hdu[0].header['GRATEPOS']
# TODO: Add test for slider
# Central wavelength, grating angle, and tilt position
if slider == 3:
central_wave = hdu[0].header['G3TLTWAV']
# Not used
#angle = (hdu[0].header['G3TLTRAW'] + 29094)/2500
tilt = hdu[0].header['G3TLTVAL']
elif slider in [2,4]:
# Slider is 2 or 4
central_wave = hdu[0].header['G4TLTWAV']
# Not used
#angle = (hdu[0].header['G4TLTRAW'] + 40934)/2500
tilt = hdu[0].header['G4TLTVAL']
else:
raise ValueError('Slider has unknown value: {0}'.format(slider))
# Ruling
name = hdu[0].header['GRATENAM']
if 'Mirror' in name:
ruling = 0
else:
# Remove all non-numeric characters from the name and
# convert to a floating point number
ruling = float(re.sub('[^0-9]', '', name))
# Adjust
if abs(ruling-1200) < 0.5:
ruling = 1200.06
elif abs(ruling-831) < 2:
ruling = 831.90
# Get the orientation of the grating
roll, yaw, tilt = KeckDEIMOSSpectrograph._grating_orientation(slider, ruling, tilt)
self.grating = None if ruling == 0 else ReflectionGrating(ruling, tilt, roll, yaw,
central_wave=central_wave)
return self.grating
def get_detector_map(self):
if self.detector_map is None:
self.detector_map = DEIMOSDetectorMap()
return self.detector_map
@staticmethod
def _grating_orientation(slider, ruling, tilt):
"""
Return the roll, yaw, and tilt of the grating.
Numbers are hardwired.
From xidl/DEEP2/spec2d/pro/omodel_params.pro
"""
if slider == 2 and int(ruling) == 0:
# Mirror in place of the grating
return 0., 0., -19.423
if slider == 2:
raise ValueError('Ruling should be 0 if slider in position 2.')
# Use the calibrated coefficients
_ruling = int(ruling) if int(ruling) in [600, 831, 900, 1200] else 'other'
orientation_coeffs = {3: { 600: [ 0.145, -0.008, 5.6e-4, -0.182],
831: [ 0.143, 0.000, 5.6e-4, -0.182],
900: [ 0.141, 0.000, 5.6e-4, -0.134],
1200: [ 0.145, 0.055, 5.6e-4, -0.181],
'other': [ 0.145, 0.000, 5.6e-4, -0.182] },
4: { 600: [-0.065, 0.063, 6.9e-4, -0.298],
831: [-0.034, 0.060, 6.9e-4, -0.196],
900: [-0.064, 0.083, 6.9e-4, -0.277],
1200: [-0.052, 0.122, 6.9e-4, -0.294],
'other': [-0.050, 0.080, 6.9e-4, -0.250] } }
# Return calbirated roll, yaw, and tilt
return orientation_coeffs[slider][_ruling][0], \
orientation_coeffs[slider][_ruling][1], \
tilt*(1-orientation_coeffs[slider][_ruling][2]) \
+ orientation_coeffs[slider][_ruling][3]
def mask_to_pixel_coordinates(self, x=None, y=None, wave=None, order=1, filename=None,
corners=False):
r"""
Convert the mask coordinates in mm to pixel coordinates on the
DEIMOS detector.
If not already instantiated, the :attr:`slitmask`,
:attr:`grating`, :attr:`optical_model`, and :attr:`detector_map`
attributes are instantiated. If these are not instantiated, a
file must be provided. If no arguments are provided, the
function expects these attributes to be set and will output the
pixel coordinates for the centers of the slits in the
:attr:`slitmask` at the central wavelength of the
:attr:`grating`.
Method generally expected to be executed in one of two modes:
- Use the `filename` to read the slit mask and determine the
detector positions at the central wavelength.
- Specifically map the provided x, y, and wave values to the
detector.
If arrays are provided for both `x`, `y`, and `wave`, the
returned objects have the shape :math:`N_\lambda\times S_x`,
where :math:`S_x` is the shape of the x and y arrays.
Args:
x (array-like, optional):
The x coordinates in the slit mask in mm. Default is to
use the center of the slits in the :attr:`slitmask`.
y (array-like, optional):
The y coordinates in the slit mask in mm. Default is to
use the center of the slits in the :attr:`slitmask`.
wave (array-like, optional):
The wavelengths in angstroms for the propagated
coordinates. Default is to use the central wavelength
of the :attr:`grating`.
order (:obj:`int`, optional):
The grating order. Default is 1.
filename (:obj:`str`, optional):
The filename to use to (re)instantiate the
:attr:`slitmask` and :attr:`grating`. Default is to use
previously instantiated attributes.
corners (:obj:`bool`, optional):
Instead of using the centers of the slits in the
:attr:`slitmask`, return the detector pixel coordinates
for the corners of all slits.
Returns:
numpy.ndarray: Returns 5 arrays: (1-2) the x and y
coordinates in the image plane in mm, (3) the detector
(1-indexed) where the slit should land at the provided
wavelength(s), and (4-5) the pixel coordinates (1-indexed)
in the relevant detector.
Raises:
ValueError:
Raised if the user provides one but not both of the x
and y coordinates, if no coordinates are provided or
available within the :attr:`slitmask`, or if the
:attr:`grating` hasn't been defined and not file is
provided.
"""
# Cannot provide just one of x or y
if x is None and y is not None or x is not None and y is None:
raise ValueError('Must provide both x and y or neither to use slit mask.')
# Use the file to update the slitmask (if no x coordinates are
# provided) and the grating
if filename is not None:
if x is None and y is None:
# Reset the slit mask
self.get_slitmask(filename)
# Reset the grating
self.get_grating(filename)
# Check that any coordinates are available
if x is None and y is None and self.slitmask is None:
raise ValueError('No coordinates; Provide them directly or instantiate slit mask.')
# Make sure the coordinates are numpy arrays
_x = None if x is None else np.atleast_1d(x)
_y = None if y is None else np.atleast_1d(y)
if _x is None:
# Use all the slit centers or corners
_x = self.slitmask.corners[...,0].ravel() if corners else self.slitmask.center[:,0]
_y = self.slitmask.corners[...,1].ravel() if corners else self.slitmask.center[:,1]
# Check that the grating is defined
if self.grating is None:
raise ValueError('Must define a grating first; provide a file or use get_grating()')
# Instantiate the optical model or reset it grating
if self.optical_model is None:
self.optical_model = DEIMOSOpticalModel(self.grating)
else:
self.optical_model.reset_grating(self.grating)
# Instantiate the detector map, if necessary
self.get_detector_map()
# Compute the detector image plane coordinates (mm)
x_img, y_img = self.optical_model.mask_to_imaging_coordinates(_x, _y, wave=wave,
order=order)
# Reshape if computing the corner positions
if corners:
x_img = x_img.reshape(self.slitmask.corners.shape[:2])
y_img = y_img.reshape(self.slitmask.corners.shape[:2])
# Use the detector map to convert to the detector coordinates
return (x_img, y_img) + self.detector_map.ccd_coordinates(x_img, y_img)
class DEIMOSOpticalModel(OpticalModel):
# TODO: Are focal_r_surface (!R_IMSURF) and focal_r_curvature
# (!R_CURV) supposed to be the same? If so, consolodate these into
# a single number.
def __init__(self, grating):
super(DEIMOSOpticalModel, self).__init__(
20018.4, # Pupil distance in mm (!PPLDIST, !D_1)
2133.6, # Radius of the image surface in mm (!R_IMSURF)
2124.71, # Focal-plane radius of curvature in mm (!R_CURV)
2120.9, # Mask radius of curvature in mm (!M_RCURV)
np.radians(6.), # Mask tilt angle in radians (!M_ANGLE)
128.803, # Mask y zero point in mm (!ZPT_YM)
3.378, # Mask z zero-point in mm (!MASK_HT0)
2197.1, # Collimator distance in mm (sys.COL_DST)
4394.2, # Collimator radius of curvature in mm (!R_COLL)
-0.75, # Collimator curvature constant (!K_COLL)
np.radians(0.002), # Collimator tilt error in radians (sys.COL_ERR)
0.0, # Collimator tilt phi angle in radians (sys.COL_PHI)
grating, # DEIMOS grating object
np.radians(2.752), # Camera angle in radians (sys.CAM_ANG)
np.pi/2, # Camera tilt phi angle in radians (sys.CAM_PHI)
382.0, # Camera focal length in mm (sys.CAM_FOC)
DEIMOSCameraDistortion(), # Object used to apply/remove camera distortions
np.radians(0.021), # ICS rotation in radians (sys.MOS_ROT)
[-0.234, -3.822]) # Camera optical axis center in mm (sys.X_OPT,sys.Y_OPT)
# Include tent mirror
self.tent_theta = np.radians(71.5-0.5) # Tent mirror theta angle (sys.TNT_ANG)
self.tent_phi = np.radians(90.+0.081) # Tent mirror phi angle (sys.TNT_PHI)
#TENT MIRROR: this mirror is OK to leave in del-theta,phi
self.tent_reflection \
= OpticalModel.get_reflection_transform(self.tent_theta, self.tent_phi)
def reset_grating(self, grating):
self.grating = grating
def mask_coo_to_grating_input_vectors(self, x, y):
"""
Propagate rays from the mask plane to the grating.
Taken from xidl/DEEP2/spec2d/pro/model/pre_grating.pro
Need to override parent class to add tent mirror reflection.
"""
r = super(DEIMOSOpticalModel, self).mask_coo_to_grating_input_vectors(x, y)
# Reflect off the tent mirror and return
return OpticalModel.reflect(r, self.tent_reflection)
class DEIMOSCameraDistortion:
"""Class to remove or apply DEIMOS camera distortion."""
def __init__(self):
self.c0 = 1.
self.c2 = 0.0457563
self.c4 = -0.3088123
self.c6 = -14.917
x = np.linspace(-0.6, 0.6, 1000)
y = self.remove_distortion(x)
self.interpolator = interpolate.interp1d(y, x)
def remove_distortion(self, x):
x2 = np.square(x)
return x / (self.c0 + x2 * (self.c2 + x2 * (self.c4 + x2 * self.c6)))
def apply_distortion(self, y):
indx = (y > self.interpolator.x[0]) & (y < self.interpolator.x[-1])
if not np.all(indx):
warnings.warn('Some input angles outside of valid distortion interval!')
x = np.zeros_like(y)
x[indx] = self.interpolator(y[indx])
return x
class DEIMOSDetectorMap(DetectorMap):
"""
A map of the center coordinates and rotation of each CCD in DEIMOS.
!! PIXEL COORDINATES ARE 1-INDEXED !!
"""
def __init__(self):
# Number of chips
self.nccd = 8
# Number of pixels for each chip in each dimension
self.npix = np.array([2048, 4096])
# The size of the CCD pixels in mm
self.pixel_size = 0.015
# Nominal gap between each CCD in each dimension in mm
self.ccd_gap = np.array([1, 0.1])
# Width of the CCD edge in each dimension in mm
self.ccd_edge = np.array([0.154, 0.070])
# Effective size of each chip in each dimension in pixels
self.ccd_size = self.npix + (2*self.ccd_edge + self.ccd_gap)/self.pixel_size
# Center coordinates
origin = np.array([[-1.5,-0.5], [-0.5,-0.5], [ 0.5,-0.5], [ 1.5,-0.5],
[-1.5, 0.5], [-0.5, 0.5], [ 0.5, 0.5], [ 1.5, 0.5]])
offset = np.array([[-20.05, 14.12], [-12.64, 7.25], [0.00, 0.00], [-1.34, -19.92],
[-19.02, 16.46], [ -9.65, 8.95], [1.88, 1.02], [ 4.81, -24.01]])
self.ccd_center = origin * self.ccd_size[None,:] + offset
# Construct the rotation matrix
self.rotation = np.radians([-0.082, 0.030, 0.0, -0.1206, 0.136, -0.06, -0.019, -0.082])
cosa = np.cos(self.rotation)
sina = np.sin(self.rotation)
self.rot_matrix = np.array([cosa, -sina, sina, cosa]).T.reshape(self.nccd,2,2)
# ccd_geom.pro has offsets by sys.CN_XERR, but these are all 0.
'''
def deimos_image_sections(inp, det):
"""
Parse the image for the raw image shape and data sections
Args:
inp (str or `astropy.io.fits.HDUList`_ object):
det (int):
Returns:
tuple:
shape, dsec, osec, ext_items
ext_items is a large tuple of bits and pieces for other methods
ext_items = hdu, chips, postpix, image
"""
# Check for file; allow for extra .gz, etc. suffix
if isinstance(inp, str):
fil = glob.glob(inp + '*')
if len(fil) != 1:
msgs.error('Found {0} files matching {1}'.format(len(fil), inp + '*'))
# Read
try:
msgs.info("Reading DEIMOS file: {:s}".format(fil[0]))
except AttributeError:
print("Reading DEIMOS file: {:s}".format(fil[0]))
# Open
hdu = fits.open(fil[0])
else:
hdu = inp
head0 = hdu[0].header
# Get post, pre-pix values
precol = head0['PRECOL']
postpix = head0['POSTPIX']
preline = head0['PRELINE']
postline = head0['POSTLINE']
detlsize = head0['DETLSIZE']
x0, x_npix, y0, y_npix = np.array(parse.load_sections(detlsize)).flatten()
# Setup for datasec, oscansec
dsec = []
osec = []
# get the x and y binning factors...
binning = head0['BINNING']
if binning != '1,1':
msgs.error("This binning for DEIMOS might not work. But it might..")
xbin, ybin = [int(ibin) for ibin in binning.split(',')]
# DEIMOS detectors
nchip = 8
if det is None:
chips = range(nchip)
else:
chips = [det-1] # Indexing starts at 0 here
for tt in chips:
x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2 = indexing(tt, postpix, det=det)
# Sections
idsec = '[{:d}:{:d},{:d}:{:d}]'.format(y1, y2, x1, x2)
iosec = '[{:d}:{:d},{:d}:{:d}]'.format(o_y1, o_y2, o_x1, o_x2)
dsec.append(idsec)
osec.append(iosec)
# Create final image (if the full image is requested)
if det is None:
image = np.zeros((x_npix,y_npix+4*postpix))
shape = image.shape
else:
image = None
head = hdu[chips[0]+1].header
shape = (head['NAXIS2'], head['NAXIS1']-precol) # We don't load up the precol
# Pack up a few items for use elsewhere
ext_items = hdu, chips, postpix, image
# Return
return shape, dsec, osec, ext_items
def read_deimos(raw_file, det=None):
"""
Read a raw DEIMOS data frame (one or more detectors)
Packed in a multi-extension HDU
Based on pypeit.arlris.read_lris...
Based on readmhdufits.pro
Parameters
----------
raw_file : str
Filename
Returns
-------
array : ndarray
Combined image
hdu: HDUList
sections : tuple
List of datasec, oscansec sections
"""
# Parse the header
shape, dsec, osec, ext_items = deimos_image_sections(raw_file, det)
# Unpack
hdu, chips, postpix, image = ext_items
# Loop
for tt in chips:
data, oscan = deimos_read_1chip(hdu, tt+1)
#if n_elements(nobias) eq 0 then nobias = 0
# One detector??
if det is not None:
image = np.zeros((data.shape[0],data.shape[1]+oscan.shape[1]))
# Indexing
x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2 = indexing(tt, postpix, det=det)
# Fill
image[y1:y2, x1:x2] = data
image[o_y1:o_y2, o_x1:o_x2] = oscan
# Return
return image, hdu, (dsec,osec)
'''
def indexing(itt, postpix, det=None):
"""
Some annoying book-keeping for instrument placement.
Parameters
----------
itt : int
postpix : int
det : int, optional
Returns
-------
"""
# Deal with single chip
if det is not None:
tt = 0
else:
tt = itt
ii = 2048
jj = 4096
# y indices
if tt < 4:
y1, y2 = 0, jj
else:
y1, y2 = jj, 2*jj
o_y1, o_y2 = y1, y2
# x
x1, x2 = (tt%4)*ii, (tt%4 + 1)*ii
if det is None:
o_x1 = 4*ii + (tt%4)*postpix
else:
o_x1 = ii + (tt%4)*postpix
o_x2 = o_x1 + postpix
# Return
return x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2
def deimos_read_1chip(hdu,chipno):
""" Read one of the DEIMOS detectors
Args:
hdu (astropy.io.fits.HDUList):
chipno (int):
Returns:
np.ndarray, np.ndarray:
data, oscan
"""
# Extract datasec from header
datsec = hdu[chipno].header['DATASEC']
detsec = hdu[chipno].header['DETSEC']
postpix = hdu[0].header['POSTPIX']
precol = hdu[0].header['PRECOL']
x1_dat, x2_dat, y1_dat, y2_dat = np.array(parse.load_sections(datsec)).flatten()
x1_det, x2_det, y1_det, y2_det = np.array(parse.load_sections(detsec)).flatten()
# This rotates the image to be increasing wavelength to the top
#data = np.rot90((hdu[chipno].data).T, k=2)
#nx=data.shape[0]
#ny=data.shape[1]
# Science data
fullimage = hdu[chipno].data
data = fullimage[x1_dat:x2_dat,y1_dat:y2_dat]
# Overscan
oscan = fullimage[:,y2_dat:]
# Flip as needed
if x1_det > x2_det:
data = np.flipud(data)
oscan = np.flipud(oscan)
if y1_det > y2_det:
data = np.fliplr(data)
oscan = np.fliplr(oscan)
# Return
return data, oscan
```
#### File: pypeit/spectrographs/keck_hires.py
```python
import glob
import re
import os
import numpy as np
from scipy import interpolate
from astropy.io import fits
from pypeit import msgs
from pypeit import telescopes
from pypeit.core import parse
from pypeit.core import framematch
from pypeit.par import pypeitpar
from pypeit.spectrographs import spectrograph
from pypeit.spectrographs.slitmask import SlitMask
from pypeit.spectrographs.opticalmodel import ReflectionGrating, OpticalModel, DetectorMap
from pypeit import debugger
class KECKHIRESSpectrograph(spectrograph.Spectrograph):
"""
Child to handle KECK/HIRES specific code
"""
def __init__(self):
# Get it started
super(KECKHIRESSpectrograph, self).__init__()
self.spectrograph = 'keck_hires_base'
self.telescope = telescopes.KeckTelescopePar()
@property
def pypeline(self):
return 'Echelle'
@staticmethod
def default_pypeit_par():
"""
Set default parameters for KECK HIRES reductions.
"""
par = pypeitpar.PypeItPar()
# Correct for flexure using the default approach
par['flexure'] = pypeitpar.FlexurePar()
return par
def init_meta(self):
"""
Generate the meta data dict
Note that the children can add to this
"""
self.meta = {}
# Required (core)
self.meta['ra'] = dict(ext=0, card='RA')
self.meta['dec'] = dict(ext=0, card='DEC')
self.meta['target'] = dict(ext=0, card='OBJECT')
self.meta['decker'] = dict(ext=0, card='DECKNAME')
self.meta['binning'] = dict(ext=0, card='BINNING')
self.meta['mjd'] = dict(ext=0, card='MJD')
self.meta['exptime'] = dict(ext=0, card='EXPTIME')
self.meta['airmass'] = dict(ext=0, card='AIRMASS')
self.meta['dispname'] = dict(ext=0, card='ECHNAME')
# Extras for config and frametyping
# self.meta['echangl'] = dict(ext=0, card='ECHANGL')
# self.meta['xdangl'] = dict(ext=0, card='XDANGL')
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
# TODO: Allow for 'sky' frame type, for now include sky in
# 'science' category
if ftype == 'science':
return good_exp & (fitstbl['idname'] == 'Object')
if ftype == 'standard':
return good_exp & ((fitstbl['idname'] == 'Std') | (fitstbl['idname'] == 'Object'))
if ftype == 'bias':
return good_exp & (fitstbl['idname'] == 'Bias')
if ftype == 'dark':
return good_exp & (fitstbl['idname'] == 'Dark')
if ftype in ['pixelflat', 'trace']:
# Flats and trace frames are typed together
return good_exp & ((fitstbl['idname'] == 'Flat') | (fitstbl['idname'] == 'IntFlat'))
if ftype in ['arc', 'tilt']:
return good_exp & (fitstbl['idname'] == 'Line')
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
def load_raw_img_head(self, raw_file, det=None, **null_kwargs):
"""
Wrapper to the raw image reader for HIRES
Args:
raw_file (:obj:`str`):
filename
det (:obj:`int`, optional):
Desired detector. Despite default value, cannot be
``None`` (todo: set a sensible default).
**null_kwargs:
Captured and never used
Returns:
tuple: Raw image and header
"""
raw_img, head0, _ = read_hires(raw_file, det=det)
return raw_img, head0
def get_image_section(self, inp=None, det=1, section='datasec'):
"""
Return a string representation of a slice defining a section of
the detector image.
Overwrites base class function to use :func:`read_hires` to get
the image sections.
.. todo ::
- It is really ineffiecient. Can we parse
:func:`read_hires` into something that can give you the
image section directly?
This is done separately for the data section and the overscan
section in case one is defined as a header keyword and the other
is defined directly.
Args:
inp (:obj:`str`, `astropy.io.fits.Header`_, optional):
String providing the file name to read, or the relevant
header object. Default is None, meaning that the
detector attribute must provide the image section
itself, not the header keyword.
det (:obj:`int`, optional):
1-indexed detector number.
section (:obj:`str`, optional):
The section to return. Should be either 'datasec' or
'oscansec', according to the
:class:`pypeitpar.DetectorPar` keywords.
Returns:
tuple: Returns three objects: (1) A list of string
representations for the image sections, one string per
amplifier. The sections are *always* returned in PypeIt
order: spectral then spatial. (2) Boolean indicating if the
slices are one indexed. (3) Boolean indicating if the
slices should include the last pixel. The latter two are
always returned as True following the FITS convention.
"""
# Read the file
if inp is None:
msgs.error('Must provide Keck HIRES file to get image section.')
elif not os.path.isfile(inp):
msgs.error('File {0} does not exist!'.format(inp))
temp, head0, secs = read_hires(inp, det)
if section == 'datasec':
return secs[0], False, False
elif section == 'oscansec':
return secs[1], False, False
else:
raise ValueError('Unrecognized keyword: {0}'.format(section))
#
# def get_datasec_img(self, filename, det=1, force=True):
# """
# Create an image identifying the amplifier used to read each pixel.
#
# Args:
# filename (str):
# Name of the file from which to read the image size.
# det (:obj:`int`, optional):
# Detector number (1-indexed)
# force (:obj:`bool`, optional):
# Force the image to be remade
#
# Returns:
# `numpy.ndarray`: Integer array identifying the amplifier
# used to read each pixel.
# """
# if self.datasec_img is None or force:
# # Check the detector is defined
# self._check_detector()
# # Get the image shape
# raw_naxis = self.get_raw_image_shape(filename, det=det)
#
# # Binning is not required because read_hires accounts for it
# # binning = self.get_meta_value(filename, 'binning')
#
# data_sections, one_indexed, include_end, transpose \
# = self.get_image_section(filename, det, section='datasec')
#
# # Initialize the image (0 means no amplifier)
# self.datasec_img = np.zeros(raw_naxis, dtype=int)
# for i in range(self.detector[det-1]['numamplifiers']):
# # Convert the data section from a string to a slice
# datasec = parse.sec2slice(data_sections[i], one_indexed=one_indexed,
# include_end=include_end, require_dim=2,
# transpose=transpose) #, binning=binning)
# # Assign the amplifier
# self.datasec_img[datasec] = i+1
# return self.datasec_img
class KECKHIRESRSpectrograph(KECKHIRESSpectrograph):
"""
Child to handle KECK/HIRES-R specific code
"""
def __init__(self):
# Get it started
super(KECKHIRESRSpectrograph, self).__init__()
self.spectrograph = 'keck_hires_red'
self.camera = 'HIRES_R'
self.detector = [
# Detector 1 B
pypeitpar.DetectorPar(dataext = 1,
specaxis = 0, # Device is fussed with by the image reader
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.191,
darkcurr = 0.0,
saturation = 65535.,
nonlinear = 0.86,
numamplifiers = 1,
gain = 0.78, # high gain, low gain 1.9
ronoise = 2.8,
suffix = '_01'
),
# Detector 2
pypeitpar.DetectorPar(dataext = 2,
specaxis = 0,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.191,
darkcurr = 0.0,
saturation = 65535.,
nonlinear = 0.86,
numamplifiers = 1,
gain = 0.86, # high gain, low gain 2.2
ronoise = 3.1,
suffix = '_02'
),
# Detector 3
pypeitpar.DetectorPar(dataext = 3,
specaxis = 0,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.191,
darkcurr = 0.0,
saturation = 65535.,
nonlinear = 0.86,
numamplifiers = 1,
gain = 0.84, # high gain, low gain 2.2
ronoise = 3.1,
suffix = '_03'
),
]
self.numhead = 4
def default_pypeit_par(self):
"""
Set default parameters for HIRES RED reductions.
"""
par = KECKHIRESSpectrograph.default_pypeit_par()
par['rdx']['spectrograph'] = 'keck_hires_red'
# Adjustments to slit and tilts for NIR
par['calibrations']['slitedges']['edge_thresh'] = 600.
par['calibrations']['slitedges']['fit_order'] = 5
par['calibrations']['slitedges']['max_shift_adj'] = 0.5
par['calibrations']['slitedges']['left_right_pca'] = True
par['calibrations']['tilts']['tracethresh'] = 20
# Bias
par['calibrations']['biasframe']['useframe'] = 'bias'
# 1D wavelength solution
par['calibrations']['wavelengths']['lamps'] = ['ThAr']
par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']
par['calibrations']['wavelengths']['rms_threshold'] = 0.25
par['calibrations']['wavelengths']['sigdetect'] = 5.0
# Reidentification parameters
#par['calibrations']['wavelengths']['method'] = 'reidentify'
#par['calibrations']['wavelengths']['reid_arxiv'] = 'vlt_xshooter_nir.json'
par['calibrations']['wavelengths']['ech_fix_format'] = True
# Echelle parameters
par['calibrations']['wavelengths']['echelle'] = True
par['calibrations']['wavelengths']['ech_nspec_coeff'] = 4
par['calibrations']['wavelengths']['ech_norder_coeff'] = 4
par['calibrations']['wavelengths']['ech_sigrej'] = 3.0
# Always correct for flexure, starting with default parameters
par['flexure'] = pypeitpar.FlexurePar()
par['scienceframe']['process']['sigclip'] = 20.0
par['scienceframe']['process']['satpix'] ='nothing'
par['calibrations']['standardframe']['exprng'] = [None, 600]
par['scienceframe']['exprng'] = [600, None]
return par
# def check_headers(self, headers):
# """
# Check headers match expectations for an KECK/HIRES-R exposure.
#
# See also
# :func:`pypeit.spectrographs.spectrograph.Spectrograph.check_headers`.
#
# Args:
# headers (list):
# A list of headers read from a fits file
# """
# expected_values = { '0.INSTRUME': 'HIRES: High Resolution Echelle Spectrometer',
# '0.XDISPERS': 'RED'}
# super(KECKHIRESRSpectrograph, self).check_headers(headers,
# expected_values=expected_values)
#
# def header_keys(self):
# hdr_keys = super(KECKHIRESRSpectrograph, self).header_keys()
# hdr_keys[0]['decker'] = 'DECKNAME'
# return hdr_keys
def init_meta(self):
super(KECKHIRESRSpectrograph, self).init_meta()
self.meta['decker'] = dict(ext=0, card='DECKNAME')
def bpm(self, shape=None, filename=None, det=None, **null_kwargs):
"""
Override parent bpm function with BPM specific to X-ShooterNIR.
.. todo::
Allow for binning changes.
Parameters
----------
det : int, REQUIRED
**null_kwargs:
Captured and never used
Returns
-------
bpix : ndarray
0 = ok; 1 = Mask
"""
self.empty_bpm(shape=shape, filename=filename, det=det)
return self.bpm_img
def indexing(itt, postpix, det=None,xbin=None,ybin=None):
"""
Some annoying book-keeping for instrument placement.
Parameters
----------
itt : int
postpix : int
det : int, optional
Returns
-------
"""
# Deal with single chip
if det is not None:
tt = 0
else:
tt = itt
ii = int(np.round(2048/xbin))
jj = int(np.round(4096/ybin))
# y indices
y1, y2 = 0, jj
o_y1, o_y2 = y1, y2
# x
x1, x2 = (tt%4)*ii, (tt%4 + 1)*ii
if det is None:
o_x1 = 4*ii + (tt%4)*postpix
else:
o_x1 = ii + (tt%4)*postpix
o_x2 = o_x1 + postpix
# Return
return x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2
def hires_read_1chip(hdu,chipno):
""" Read one of the HIRES detectors
Parameters
----------
hdu : HDUList
chipno : int
Returns
-------
data : ndarray
oscan : ndarray
"""
# Extract datasec from header
datsec = hdu[chipno].header['DATASEC']
detsec = hdu[chipno].header['DETSEC']
postpix = hdu[0].header['POSTPIX']
precol = hdu[0].header['PRECOL']
x1_dat, x2_dat, y1_dat, y2_dat = np.array(parse.load_sections(datsec)).flatten()
x1_det, x2_det, y1_det, y2_det = np.array(parse.load_sections(detsec)).flatten()
# This rotates the image to be increasing wavelength to the top
#data = np.rot90((hdu[chipno].data).T, k=2)
#nx=data.shape[0]
#ny=data.shape[1]
# Science data
fullimage = hdu[chipno].data
data = fullimage[x1_dat:x2_dat,y1_dat:y2_dat]
# Overscan
oscan = fullimage[:,y2_dat:]
# Flip as needed
if x1_det > x2_det:
data = np.flipud(data)
oscan = np.flipud(oscan)
if y1_det > y2_det:
data = np.fliplr(data)
oscan = np.fliplr(oscan)
# Return
return data, oscan
def read_hires(raw_file, det=None):
"""
Read a raw HIRES data frame (one or more detectors).
Data are unpacked from the multi-extension HDU. Function is
based :func:`pypeit.spectrographs.keck_lris.read_lris`, which
was based on the IDL procedure ``readmhdufits.pro``.
Parameters
----------
raw_file : str
Filename
Returns
-------
array : ndarray
Combined image
header : FITS header
sections : tuple
List of datasec, oscansec sections
"""
# Check for file; allow for extra .gz, etc. suffix
fil = glob.glob(raw_file + '*')
if len(fil) != 1:
msgs.error('Found {0} files matching {1}'.format(len(fil), raw_file + '*'))
# Read
try:
msgs.info("Reading HIRES file: {:s}".format(fil[0]))
except AttributeError:
print("Reading HIRES file: {:s}".format(fil[0]))
hdu = fits.open(fil[0])
head0 = hdu[0].header
# Get post, pre-pix values
precol = head0['PRECOL']
postpix = head0['POSTPIX']
preline = head0['PRELINE']
postline = head0['POSTLINE']
detlsize = head0['DETLSIZE']
x0, x_npix, y0, y_npix = np.array(parse.load_sections(detlsize)).flatten()
# Create final image
if det is None:
image = np.zeros((x_npix,y_npix+4*postpix))
# Setup for datasec, oscansec
dsec = []
osec = []
# get the x and y binning factors...
binning = head0['BINNING']
if binning != '3,1':
msgs.warn("This binning for HIRES might not work. But it might..")
xbin, ybin = [int(ibin) for ibin in binning.split(',')]
# HIRES detectors
nchip = 3
if det is None:
chips = range(nchip)
else:
chips = [det-1] # Indexing starts at 0 here
# Loop
for tt in chips:
data, oscan = hires_read_1chip(hdu, tt+1)
# One detector??
if det is not None:
image = np.zeros((data.shape[0],data.shape[1]+oscan.shape[1]))
# Indexing
x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2 = indexing(tt, postpix, det=det,xbin=xbin,ybin=ybin)
# Fill
image[y1:y2, x1:x2] = data
image[o_y1:o_y2, o_x1:o_x2] = oscan
# Sections
idsec = '[{:d}:{:d},{:d}:{:d}]'.format(y1, y2, x1, x2)
iosec = '[{:d}:{:d},{:d}:{:d}]'.format(o_y1, o_y2, o_x1, o_x2)
dsec.append(idsec)
osec.append(iosec)
# Return
return image, head0, (dsec,osec)
```
#### File: pypeit/spectrographs/wht_isis.py
```python
import numpy as np
from astropy.io import fits
from pkg_resources import resource_filename
from pypeit import msgs
from pypeit import telescopes
from pypeit.core import framematch
from pypeit.par import pypeitpar
from pypeit.spectrographs import spectrograph
from pypeit.core import parse
from pypeit import debugger
#class WhtIsisSpectrograph(spectrograph.Spectrograph):
# """
# Child to handle Shane/Kast specific code
# """
#
# def __init__(self):
# super(WhtIsisSpectrograph, self).__init__()
# self.spectrograph = 'wht_isis_base'
# self.telescope = telescopes.WHTTelescopePar()
#
# def metadata_keys(self):
# return super(KeckLRISSpectrograph, self).metadata_keys() \
# + ['binning', 'dichroic', 'dispangle']
class WHTISISBlueSpectrograph(spectrograph.Spectrograph):
"""
Child to handle WHT/ISIS blue specific code
"""
def __init__(self):
# Get it started
super(WHTISISBlueSpectrograph, self).__init__()
self.spectrograph = 'wht_isis_blue'
self.telescope = telescopes.WHTTelescopePar()
self.camera = 'ISISb'
self.detector = [
# Detector 1
pypeitpar.DetectorPar(
dataext = 1,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.225,
darkcurr = 0.0,
saturation = 65535.,
nonlinear = 0.76,
numamplifiers = 1,
gain = 1.2,
ronoise = 5.0,
datasec = '[:,2:4030]',
oscansec = None,
suffix = '_blue'
)]
self.numhead = 2
# Uses default timeunit
# Uses default primary_hdrext
# self.sky_file = ?
def default_pypeit_par(self):
"""
Set default parameters for Keck LRISb reductions.
"""
par = pypeitpar.PypeItPar()
par['rdx']['spectrograph'] = 'wht_isis_blue'
# Ignore PCA
par['calibrations']['slitedges']['sync_predict'] = 'nearest'
# Turn off the overscan
for ftype in par['calibrations'].keys():
try:
par['calibrations'][ftype]['process']['overscan'] = 'none'
except (TypeError, KeyError):
pass
par['scienceframe']['process']['overscan'] = 'none'
# Set pixel flat combination method
par['calibrations']['pixelflatframe']['process']['combine'] = 'median'
par['calibrations']['pixelflatframe']['process']['sig_lohi'] = [10.,10.]
# Change the wavelength calibration method
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['lamps'] = ['NeI', 'ArI', 'ArII', 'CuI']
par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']
par['calibrations']['wavelengths']['n_first'] = 3
par['calibrations']['wavelengths']['n_final'] = 5
par['calibrations']['wavelengths']['sigdetect'] = 10.0
par['calibrations']['wavelengths']['wv_cen'] = 4859.0
par['calibrations']['wavelengths']['disp'] = 0.2
# Do not flux calibrate
par['fluxcalib'] = None
# Always correct for flexure, starting with default parameters
par['flexure'] = pypeitpar.FlexurePar()
# Set the default exposure time ranges for the frame typing
par['calibrations']['biasframe']['exprng'] = [None, 1]
par['calibrations']['darkframe']['exprng'] = [999999, None] # No dark frames
par['calibrations']['pinholeframe']['exprng'] = [999999, None] # No pinhole frames
par['calibrations']['arcframe']['exprng'] = [None, 120]
par['calibrations']['standardframe']['exprng'] = [None, 120]
par['scienceframe']['exprng'] = [90, None]
return par
def config_specific_par(self, scifile, inp_par=None):
"""
Modify the PypeIt parameters to hard-wired values used for
specific instrument configurations.
.. todo::
Document the changes made!
Args:
scifile (str):
File to use when determining the configuration and how
to adjust the input parameters.
inp_par (:class:`pypeit.par.parset.ParSet`, optional):
Parameter set used for the full run of PypeIt. If None,
use :func:`default_pypeit_par`.
Returns:
:class:`pypeit.par.parset.ParSet`: The PypeIt paramter set
adjusted for configuration specific parameter values.
"""
par = self.default_pypeit_par() if inp_par is None else inp_par
# Wavelength calibrations
if self.get_meta_value(scifile, 'dispname') == 'R1200B':
par['calibrations']['wavelengths']['reid_arxiv'] = 'wht_isis_blue_1200_4800.fits'
# Return
return par
def init_meta(self):
"""
Generate the meta data dict
Note that the children can add to this
Returns:
self.meta: dict (generated in place)
"""
meta = {}
# Required (core)
meta['ra'] = dict(ext=0, card='RA')
meta['dec'] = dict(ext=0, card='DEC')
meta['target'] = dict(ext=0, card='OBJECT')
meta['decker'] = dict(card=None, compound=True)
meta['binning'] = dict(card=None, compound=True)
meta['mjd'] = dict(ext=0, card='MJD-OBS')
meta['exptime'] = dict(ext=0, card='EXPTIME')
meta['airmass'] = dict(ext=0, card='AIRMASS')
meta['decker'] = dict(ext=0, card='ISISLITU')
# Extras for config and frametyping
meta['dispname'] = dict(ext=0, card='ISIGRAT')
meta['dichroic'] = dict(ext=0, card='ISIDICHR')
meta['dispangle'] = dict(ext=0, card='CENWAVE', rtol=1e-3)
meta['slitwid'] = dict(ext=0, card='ISISLITW')
meta['idname'] = dict(ext=0, card='IMAGETYP')
# Lamps
meta['lampstat01'] = dict(ext=0, card='CAGLAMPS')
# Ingest
self.meta = meta
def compound_meta(self, headarr, meta_key):
if meta_key == 'binning':
binspatial = headarr[0]['CCDXBIN']
binspec = headarr[0]['CCDYBIN']
return parse.binning2string(binspec, binspatial)
else:
msgs.error("Not ready for this compound meta")
def configuration_keys(self):
"""
Return the metadata keys that defines a unique instrument
configuration.
This list is used by :class:`pypeit.metadata.PypeItMetaData` to
identify the unique configurations among the list of frames read
for a given reduction.
Returns:
list: List of keywords of data pulled from meta
"""
return ['dispname', 'decker', 'binning', 'dispangle', 'dichroic']
def pypeit_file_keys(self):
pypeit_keys = super(WHTISISBlueSpectrograph, self).pypeit_file_keys()
pypeit_keys += ['slitwid']
return pypeit_keys
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
if ftype in ['science', 'standard']:
return good_exp & (fitstbl['lampstat01'] == 'Off') & (fitstbl['idname'] == 'object')
if ftype == 'bias':
return good_exp & (fitstbl['idname'] == 'zero')
if ftype in ['pixelflat', 'trace']:
return good_exp & (fitstbl['lampstat01'] == 'W') & (fitstbl['idname'] == 'flat')
if ftype in ['pinhole', 'dark']:
# Don't type pinhole or dark frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['arc', 'tilt']:
return good_exp & (fitstbl['lampstat01'] == 'CuNe+CuAr') & (fitstbl['idname'] == 'arc')
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
def bpm(self, filename=None, det=None, shape=None, msbias=None, **null_kwargs):
""" Generate a BPM
Parameters
----------
shape : tuple, REQUIRED
filename : str, REQUIRED for binning
det : int, REQUIRED
**null_kwargs:
Captured and never used
Returns
-------
badpix : ndarray
"""
# Get the empty bpm: force is always True
#import pdb
#pdb.set_trace()
self.bpm_img = self.empty_bpm(filename, det=det, shape=shape)
# Only defined for det=2
if msbias is not None:
msgs.info("Generating a BPM for det={0:d} on ISISb".format(det))
medval = np.median(msbias.image)
madval = 1.4826 * np.median(np.abs(medval - msbias.image))
ww = np.where(np.abs(msbias.image-medval) > 10.0*madval)
self.bpm_img[ww] = 1
return self.bpm_img
```
#### File: pypeit/tests/test_flux.py
```python
import os
import sys
import numpy as np
import pytest
#try:
# tsterror = FileExistsError
#except NameError:
# FileExistsError = OSError
from astropy import units
from pypeit.core import flux_calib
from pypeit.core import load
from pypeit.spectrographs.util import load_spectrograph
from pypeit import specobjs
from pypeit.tests.tstutils import dummy_fitstbl
#from xastropy.xutils import afits as xafits
#from xastropy.xutils import xdebug as xdb
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
# JFH This test is defunct
#def test_bspline_fit():
# # Testing the bspline works ok (really testing bkspace)
# fit_dict = linetools.utils.loadjson(data_path('flux_data.json'))
# wave = np.array(fit_dict['wave'])
# magfunc = np.array(fit_dict['magf'])
# logivar = np.array(fit_dict['logiv'])
# bspline_par = dict(bkspace=fit_dict['bkspec'])
# mask, tck = utils.robust_polyfit(wave, magfunc, 3, function='bspline',
# weights=np.sqrt(logivar), bspline_par=bspline_par)
def test_gen_sensfunc():
kastr = load_spectrograph('shane_kast_red')
# Load a random spectrum for the sensitivity function
sfile = data_path('spec1d_r153-J0025-0312_KASTr_2015Jan23T025323.850.fits')
sobjs = specobjs.SpecObjs.from_fitsfile(sfile)
# telescope = telescopes.ShaneTelescopePar()
fitstbl = dummy_fitstbl()
RA = '05:06:36.6'
DEC = '52:52:01.0'
# Get the sensitivity function
sens_dict = flux_calib.generate_sensfunc(sobjs[0].BOX_WAVE,
sobjs[0].BOX_COUNTS,
sobjs[0].BOX_COUNTS_IVAR,
fitstbl['airmass'][4], fitstbl['exptime'][4],
kastr.telescope['longitude'],
kastr.telescope['latitude'],
ra=RA, dec=DEC)
# Test
assert isinstance(sens_dict, dict)
assert isinstance(sens_dict['wave_min'], units.Quantity)
def test_find_standard():
# G191b2b
std_ra = '05:06:30.6'
std_dec = '52:49:51.0'
# Grab
std_dict = flux_calib.find_standard_file(std_ra, std_dec)
# Test
assert std_dict['name'] == 'G191B2B'
# assert std_dict['cal_file'] == 'data/standards/calspec/g191b2b_mod_005.fits'
assert std_dict['cal_file'] == 'data/standards/calspec/g191b2b_stisnic_002.fits'
assert std_dict['std_source'] == 'calspec'
# Fail to find
# near G191b2b
std_ra = '05:06:36.6'
std_dec = '52:22:01.0'
std_dict = flux_calib.find_standard_file(std_ra, std_dec)
assert std_dict is None
def test_load_extinction():
# Load
extinct = flux_calib.load_extinction_data(121.6428, 37.3413889)
np.testing.assert_allclose(extinct['wave'][0], 3200.)
assert extinct['wave'].unit == units.AA
np.testing.assert_allclose(extinct['mag_ext'][0], 1.084)
# Fail
extinct = flux_calib.load_extinction_data(0., 37.3413889)
assert extinct is None
def test_extinction_correction():
# Load
extinct = flux_calib.load_extinction_data(121.6428, 37.3413889)
# Correction
wave = np.arange(3000.,10000.)*units.AA
AM=1.5
flux_corr = flux_calib.extinction_correction(wave, AM, extinct)
# Test
np.testing.assert_allclose(flux_corr[0], 4.47095192)
```
#### File: pypeit/tests/test_pca.py
```python
import os
import numpy as np
import pytest
from astropy.io import fits
from pypeit.tracepca import TracePCA
@pytest.fixture
def vec_coo():
nvec = 50
return np.linspace(0,1,nvec)
@pytest.fixture
def bogus_vectors(vec_coo):
# Generate some bogus vectors
nspec = 1000
spec_coo = np.linspace(0,1,nspec)
coeff0 = 0.1*np.square(vec_coo) + vec_coo + 10
coeff1 = 2*vec_coo - 3
base_vector = 3*spec_coo + np.power(spec_coo,3)
return coeff0[None,:] + coeff1[None,:]*base_vector[:,None]
def test_build(vec_coo, bogus_vectors):
pca = TracePCA(trace_cen=bogus_vectors, npca=2, coo=vec_coo)
pca.build_interpolator([1,2])
assert pca.npca == 2, 'Incorrect number of components'
assert pca.nspec == bogus_vectors.shape[0], 'Incorrect number of pixels'
assert np.array_equal(vec_coo, pca.trace_coo), 'Coordinates do not match'
# TODO: More checks?
def test_prediction(vec_coo, bogus_vectors):
pca = TracePCA(trace_cen=bogus_vectors, npca=2, coo=vec_coo)
pca.build_interpolator([1,2])
pred = pca.predict(0.5)
assert pred.size == bogus_vectors.shape[0], 'Bad prediction'
# TODO: More checks?
def test_write(vec_coo, bogus_vectors):
pca = TracePCA(trace_cen=bogus_vectors, npca=2, coo=vec_coo)
pca.build_interpolator([1,2])
ofile = 'junkpca.fits'
fits.HDUList([fits.PrimaryHDU(), pca.to_hdu()]).writeto(ofile)
os.remove(ofile)
def test_read(vec_coo, bogus_vectors):
pca = TracePCA(trace_cen=bogus_vectors, npca=2, coo=vec_coo)
pca.build_interpolator([1,2])
ofile = 'junkpca.fits'
fits.HDUList([fits.PrimaryHDU(), pca.to_hdu()]).writeto(ofile)
readpca = TracePCA.from_file(ofile)
assert np.array_equal(pca.trace_coo, readpca.trace_coo), 'Bad read'
assert np.array_equal(pca.pca_mean, readpca.pca_mean), 'Bad read'
assert np.array_equal(pca.pca_coeffs, readpca.pca_coeffs), 'Bad read'
assert np.array_equal(pca.pca_components, readpca.pca_components), 'Bad read'
assert np.array_equal(pca.pca_bpm, readpca.pca_bpm), 'Bad read'
assert pca.npca == readpca.npca, 'Bad read'
assert pca.nspec == readpca.nspec, 'Bad read'
for i in range(pca.npca):
assert np.array_equal(pca.fit_coeff[i], readpca.fit_coeff[i]), 'Bad read'
os.remove(ofile)
```
#### File: pypeit/tests/test_pydl.py
```python
import numpy as np
from pypeit.core.pydl import bspline
import pytest
try:
tsterror = FileExistsError
except NameError:
FileExistsError = OSError
def test_bsplinetodict():
""" Test for writing a bspline onto a dict
(and also reading it out).
"""
x = np.random.rand(500)
# Create bspline
init_bspline = bspline(x, bkspace=0.01*(np.max(x)-np.min(x)))
# Write bspline to bspline_dict
bspline_dict = init_bspline.to_dict()
# Create bspline from bspline_dict
bspline_fromdict = bspline(None, from_dict=bspline_dict)
assert np.max(np.array(bspline_dict['breakpoints'])-bspline_fromdict.breakpoints) == 0.
```
#### File: pypeit/tests/test_traceimage.py
```python
import os
import pytest
import glob
import numpy as np
from pypeit import traceimage
from pypeit.tests.tstutils import dev_suite_required
from pypeit.spectrographs.util import load_spectrograph
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
@pytest.fixture
@dev_suite_required
def deimos_flat_files():
return [os.path.join(os.getenv('PYPEIT_DEV'), 'RAW_DATA', 'Keck_DEIMOS', '830G_L_8400', ifile)
for ifile in ['d0914_0014.fits.gz', 'd0914_0015.fits.gz']]
@dev_suite_required
def test_instantiate(deimos_flat_files):
# Empty
traceImage = traceimage.TraceImage('keck_deimos',[])
@dev_suite_required
def test_process(deimos_flat_files):
keck_deimos = load_spectrograph('keck_deimos')
# Instantiate
traceImage = traceimage.TraceImage(keck_deimos, deimos_flat_files)
# Run
assert traceImage.nfiles == 2
traceImage.build_image()
assert isinstance(traceImage.pypeitImage.image, np.ndarray)
for key in ['subtract_overscan', 'apply_gain']:
assert key in traceImage.process_steps
```
#### File: pypeit/tests/test_waveimage.py
```python
import os
import pytest
import glob
import numpy as np
from pypeit.tests.tstutils import load_kast_blue_masters, cooked_required
from pypeit import waveimage
from pypeit.spectrographs.util import load_spectrograph
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
@cooked_required
def test_build_me():
# Masters
spectrograph = load_spectrograph('shane_kast_blue')
edges, tilts_dict, wv_calib = load_kast_blue_masters(edges=True, tilts=True, wvcalib=True)
tslits_dict = edges.convert_to_tslits_dict()
# Instantiate
master_key = 'A_01_aa'
master_dir = os.path.join(os.getenv('PYPEIT_DEV'), 'Cooked', 'Shane_Kast_blue')
nslits = tslits_dict['nslits']
maskslits = np.zeros(nslits, dtype=bool)
det = 1
wvImg = waveimage.WaveImage(tslits_dict, tilts_dict['tilts'], wv_calib,
spectrograph, det, maskslits,
master_key=master_key, master_dir=master_dir,
reuse_masters=True)
# Build
wave = wvImg.build_wave()
assert int(np.max(wave)) > 5510
``` |
{
"source": "joshwalawender/RemoteObserving",
"score": 2
} |
#### File: joshwalawender/RemoteObserving/test_tigervnc.py
```python
import pytest
from pathlib import Path
import logging
import subprocess
import re
from keck_vnc_launcher import create_logger, KeckVncLauncher, create_parser
# create kvl object
create_logger()
kvl = KeckVncLauncher()
kvl.log = logging.getLogger('KRO')
kvl.log_system_info()
kvl.args = create_parser()
kvl.get_config()
kvl.check_config()
def we_using_tigervnc():
vncviewercmd = kvl.config.get('vncviewer', 'vncviewer')
cmd = [vncviewercmd, '--help']
kvl.log.info(f'Checking VNC viewer: {" ".join(cmd)}')
result = subprocess.run(cmd, capture_output=True)
output = result.stdout.decode() + '\n' + result.stderr.decode()
if re.search(r'TigerVNC', output):
kvl.log.info(f'We are using TigerVNC')
return True
else:
kvl.log.info(f'We are NOT using TigerVNC')
return False
def test_tigervnc_config_file_exists():
if we_using_tigervnc() is True:
tigervnc_config_file = Path('~/.vnc/default.tigervnc').expanduser()
if tigervnc_config_file.exists() is False:
kvl.log.error(f'Could not find {tigervnc_config_file}')
assert tigervnc_config_file.exists()
def test_tigervnc_config_RemoteResize():
if we_using_tigervnc() is True:
tigervnc_config_file = Path('~/.vnc/default.tigervnc').expanduser()
with open(tigervnc_config_file) as FO:
tiger_config = FO.read()
RRsearch = re.search(r'RemoteResize=(\d)', tiger_config)
if RRsearch is None:
kvl.log.error('Could not find RemoteResize setting')
assert RRsearch is not None
else:
remote_resize_value = int(RRsearch.group(1))
kvl.log.info(f'Found RemoteResize set to {remote_resize_value}')
if remote_resize_value !=0:
kvl.log.error('RemoteResize must be set to 0')
assert remote_resize_value == 0
``` |
{
"source": "joshwalawender/SIDRE",
"score": 3
} |
#### File: SIDRE/SIDRE/sort.py
```python
import os
import re
import ccdproc as ccd
import astropy.units as u
from astropy import table
from .config import get_config
def get_ImageFileCollection(filepath):
'''
Given a directory path with FITS files in it, use the header keywords (hard
coded in this function) to categorize each file as one of:
Science: A science exposure
Bias: A bias frame
Dark: A dark frame
Flat: A flat field frame (twilight or dome)
Rejected: A file that has been rejection for any reason.
Uncategorized: A file which was not categorized as one of the above.
A column called "CATEGORY" is added to the `ImageFileCollection.summary`
table and populated with a string of the above category.
This method can be replaced to customize the code to any particular header
or metadata convention.
'''
assert os.path.exists(os.path.abspath(filepath))
temperature_deadband = get_config().get('TemperatureDeadband', 1.0)
keywords = ['EXPTIME', 'SET-TEMP', 'CCD-TEMP', 'XBINNING', 'YBINNING',
'IMAGETYP', 'OBJECT', 'DATE-OBS']
ifc = ccd.ImageFileCollection(filepath, keywords=keywords)
ifc.summary.add_column(table.Column(data=['']*len(ifc.summary),
name='CATEGORY', dtype='a12'))
for i,entry in enumerate(ifc.summary):
tempdiff = float(entry['SET-TEMP']) - float(entry['CCD-TEMP'])
if abs(tempdiff) > temperature_deadband:
ifc.summary[i]['CATEGORY'] = b'Rejected'
elif re.search('Light Frame', entry['IMAGETYP'], flags=re.IGNORECASE):
ifc.summary[i]['CATEGORY'] = b'Science'
elif re.search('Bias Frame', entry['IMAGETYP'], flags=re.IGNORECASE):
ifc.summary[i]['CATEGORY'] = b'Bias'
elif re.search('Dark Frame', entry['IMAGETYP'], flags=re.IGNORECASE):
ifc.summary[i]['CATEGORY'] = b'Dark'
elif re.search('Flat', entry['IMAGETYP'], flags=re.IGNORECASE):
ifc.summary[i]['CATEGORY'] = b'Flat'
else:
ifc.summary[i]['CATEGORY'] = b'Uncategorized'
return ifc
def get_image_table(filepath, type):
ifc = get_ImageFileCollection(filepath)
bytype = ifc.summary.group_by('CATEGORY')
typelist = bytype.groups[bytype.groups.keys['CATEGORY'] == type]
return typelist
``` |
{
"source": "joshwalawender/SuPrimeCam",
"score": 2
} |
#### File: SuPrimeCam/SuPrimeCam/process.py
```python
from pathlib import Path
import logging
import numpy as np
from astropy import units as u
from astropy.io import fits
from astropy.nddata import CCDData
from astropy.table import Table
import ccdproc
from ccdproc.utils.slices import slice_from_string
##-------------------------------------------------------------------------
## Create logger object
##-------------------------------------------------------------------------
log = logging.getLogger('MyLogger')
log.setLevel(logging.DEBUG)
## Set up console output
LogConsoleHandler = logging.StreamHandler()
LogConsoleHandler.setLevel(logging.DEBUG)
LogFormat = logging.Formatter('%(asctime)s %(levelname)8s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
LogConsoleHandler.setFormatter(LogFormat)
log.addHandler(LogConsoleHandler)
## Set up file output
# LogFileName = None
# LogFileHandler = logging.FileHandler(LogFileName)
# LogFileHandler.setLevel(logging.DEBUG)
# LogFileHandler.setFormatter(LogFormat)
# log.addHandler(LogFileHandler)
##-------------------------------------------------------------------------
## Exceptions
##-------------------------------------------------------------------------
class MEFDataError(Exception):
"""Base class for exceptions in this module."""
pass
class IncompatiblePixelData(MEFDataError):
"""Raise when trying to operate on multiple MEFData
objects which have incompatible pixeldata.
"""
def __init__(self, message):
super().__init__(f"MEFData objects have incompatible pixeldata. {message}")
class IncorrectNumberOfExtensions(MEFDataError):
"""Raise when verify method fails for a specific instrument.
"""
def __init__(self, datatype, expected, kd):
msg = f"Incorrect number of {datatype} entries. Expected {expected} for {type(kd)}"
print(msg)
super().__init__(msg)
##-------------------------------------------------------------------------
## MEFData Classes
##-------------------------------------------------------------------------
class MEFData(object):
"""Our data model.
Attributes:
pixeldata -- a list of CCDData objects containing pixel values.
tabledata -- a list of astropy.table.Table objects
headers -- a list of astropy.io.fits.Header objects
"""
def __init__(self, *args, **kwargs):
self.pixeldata = []
self.tabledata = []
self.headers = []
self.MEFhdul = None
def verify(self):
"""Method to check the data against expectations. For the
MEFData class this simply passes and does nothing, but
subclasses for specific instruments can populate this
with appropriate tests.
"""
pass
def add(self, kd2):
"""Method to add another MEFData object to this one and return
the result. This uses the CCDData object's add method and
simply loops over all elements of the pixeldata list.
"""
if len(self.pixeldata) != len(kd2.pixeldata):
raise IncompatiblePixelData
for i,pd in enumerate(self.pixeldata):
self.pixeldata[i] = pd.add(kd2.pixeldata[i])
def subtract(self, kd2):
"""Method to subtract another MEFData object to this one
and return the result. This uses the CCDData object's
subtract method and simply loops over all elements of
the pixeldata list.
"""
if len(self.pixeldata) != len(kd2.pixeldata):
raise IncompatiblePixelData
for i,pd in enumerate(self.pixeldata):
self.pixeldata[i] = pd.subtract(kd2.pixeldata[i])
def multiply(self, kd2):
"""Method to multiply another MEFData object by this one
and return the result. This uses the CCDData object's
multiply method and simply loops over all elements of
the pixeldata list.
"""
if len(self.pixeldata) != len(kd2.pixeldata):
raise IncompatiblePixelData
for i,pd in enumerate(self.pixeldata):
self.pixeldata[i] = pd.multiply(kd2.pixeldata[i])
def get(self, kw):
"""Method to loop over all headers and get the specified keyword value.
Returns the first result it finds and doe not check for duplicate
instances of the keyword in subsequent headers.
"""
for hdr in self.headers:
val = hdr.get(kw, None)
if val is not None:
return val
def create_deviation(self, readnoise=10):
for i,pd in enumerate(self.pixeldata):
gain = pd.header.get('GAIN')
self.pixeldata[i] = ccdproc.create_deviation(
pd, gain=gain * u.electron/u.adu,
readnoise=readnoise * u.electron)
def gain_correct(self):
for i,pd in enumerate(self.pixeldata):
gain = pd.header.get('GAIN')
self.pixeldata[i] = ccdproc.gain_correct(pd, gain*u.electron/u.adu)
def la_cosmic(self, sigclip=5):
for i,pd in enumerate(self.pixeldata):
self.pixeldata[i] = ccdproc.cosmicray_lacosmic(pd, sigclip=sigclip)
def bias_subtract(self, master_bias):
for i,pd in enumerate(self.pixeldata):
self.pixeldata[i] = ccdproc.subtract_bias(pd, master_bias.pixeldata[i])
def flat_correct(self, master_flat):
for i,pd in enumerate(self.pixeldata):
self.pixeldata[i] = ccdproc.flat_correct(pd, master_flat.pixeldata[i])
def assemble(self):
MEFhdul = [fits.PrimaryHDU(data=None, header=self.headers[0])]
for chip in range(0,10):
ext0 = chip*4
x0s = []
x1s = []
y0s = []
y1s = []
for ext in range(chip*4, (chip+1)*4):
hdr = self.headers[ext+1]
assert hdr.get('DET-ID') == chip
detsec = slice_from_string(hdr.get('DETSEC'), fits_convention=True)
datasec = slice_from_string(hdr.get('DATASEC'), fits_convention=True)
ccdsec = slice_from_string(hdr.get('CCDSEC'), fits_convention=True)
# print(hdr.get('CCDNAME'))
# print(ext, detsec)
# print(ext, datasec)
# print(ext, ccdsec)
x0s.append(detsec[1].start)
x1s.append(detsec[1].stop)
y0s.append(detsec[0].start)
y1s.append(detsec[0].stop)
chip_xrange = [min(x0s), max(x1s)]
chip_yrange = [min(y0s), max(y1s)]
chip_size = [max(x1s)-min(x0s), max(y1s)-min(y0s)]
chip_x0s = [x-min(x0s) for x in x0s]
chip_x1s = [x-min(x0s) for x in x1s]
# print(f"chip xrange: {chip_xrange}")
chip_data = np.zeros((chip_size[1]+1, chip_size[0]+1))
for i,ext in enumerate(range(chip*4, (chip+1)*4)):
chip_data[:,chip_x0s[i]:chip_x1s[i]+1] = self.pixeldata[ext].data
chip_hdu = fits.ImageHDU(chip_data, self.headers[ext0+1])
chip_hdu.header.set('DETSEC', f'[{chip_xrange[0]+1}:{chip_xrange[1]+1},'\
f'{chip_yrange[0]+1}:{chip_yrange[1]+1}]')
chip_hdu.header.set('DATASEC', f'[1:{chip_size[0]+1},1:{chip_size[1]+1}]')
chip_hdu.header.set('CCDSEC', f'[1:{chip_size[0]+1},1:{chip_size[1]+1}]')
chip_hdu.header.set('BUNIT', str(self.pixeldata[0].unit))
MEFhdul.append(chip_hdu)
self.MEFhdul = fits.HDUList(MEFhdul)
return MEFhdul
def to_hdul(self):
assert len(self.pixeldata) == 10
MEFhdul = [fits.PrimaryHDU(data=None, header=self.headers[0])]
for i,pd in enumerate(self.pixeldata):
hdu = pd.to_hdu()[0]
hdu = fits.ImageHDU(data=hdu.data, header=hdu.header)
MEFhdul.append(hdu)
self.MEFhdul = fits.HDUList(MEFhdul)
def write(self, file):
'''Assemble in to chips and write as 10 extension MEF.
'''
if self.MEFhdul is None:
if len(self.pixeldata) == 10:
self.to_hdul()
elif len(self.pixeldata) == 40:
self.assemble()
self.MEFhdul.writeto(file)
##-------------------------------------------------------------------------
## Get HDU Type
##-------------------------------------------------------------------------
def get_hdu_type(hdu):
"""Function to examine a FITS HDU object and determine its type. Returns
one of the following strings:
'header' -- This is a PrimaryHDU or ImageHDU with no pixel data.
'pixeldata' -- This is a PrimaryHDU or ImageHDU containing pixel data.
'uncertainty' -- This is a pixeldata HDU which is associated with the
uncertainty data written by either CCDData or MEFData.
'mask' -- This is a pixeldata HDU which is associated with the mask
data written by either CCDData or MEFData.
'tabledata' -- This is a TableHDU type HDU.
"""
if type(hdu) in [fits.PrimaryHDU, fits.ImageHDU] and hdu.data is None:
# This is a header only HDU
return 'header'
elif type(hdu) in [fits.PrimaryHDU, fits.ImageHDU] and hdu.data is not None:
# This is a pixel data HDU
extname = hdu.header.get('EXTNAME', '').strip()
if extname == 'MASK':
# This is a mask HDU
return 'mask'
elif extname == 'UNCERT':
# This is an uncertainty HDU
return 'uncertainty'
else:
# This must be pixel data
return 'pixeldata'
elif type(hdu) == fits.TableHDU:
# This is table data
return 'tabledata'
##-------------------------------------------------------------------------
## MEFData Reader
##-------------------------------------------------------------------------
def read_hdul(hdul, defaultunit='adu', datatype=MEFData):
# Loop though HDUs and read them in as pixel data or table data
md = datatype()
while len(hdul) > 0:
# print('Extracting HDU')
hdu = hdul.pop(0)
md.headers.append(hdu.header)
hdu_type = get_hdu_type(hdu)
# print(f' Got HDU type = {hdu_type}')
if hdu_type == 'header':
pass
elif hdu_type == 'tabledata':
md.tabledata.append(Table(hdu.data))
elif hdu_type == 'pixeldata':
# Check the next HDU
mask = None
uncertainty = None
if len(hdul) > 0:
next_type = get_hdu_type(hdul[0])
if next_type == 'mask':
mask = hdul[0].data
elif next_type == 'uncertainty':
uncertainty = hdul[0].data
if len(hdul) > 1:
next_type2 = get_hdu_type(hdul[1])
if next_type2 == 'mask':
mask = hdul[1].data
elif next_type2 == 'uncertainty':
uncertainty = hdul[1].data
# Sanitize "ADU per coadd" BUNIT value
if hdu.header.get('BUNIT') == "ADU per coadd":
hdu.header.set('BUNIT', 'adu')
# Populate the CCDData object
c = CCDData(hdu.data, mask=mask, uncertainty=uncertainty,
meta=hdu.header,
unit=(hdu.header.get('BUNIT', defaultunit)).lower(),
)
md.pixeldata.append(c)
# print(f'Read in {len(md.headers)} headers, '
# f'{len(md.pixeldata)} sets of pixel data, '
# f'and {len(md.tabledata)} tables')
md.verify()
return md
def fits_MEFdata_reader(file, defaultunit='adu', datatype=MEFData):
"""A reader for MEFData objects.
Currently this is a separate function, but should probably be
registered as a reader similar to fits_ccddata_reader.
Arguments:
file -- The filename (or pathlib.Path) of the FITS file to open.
Keyword arguments:
defaultunit -- If the BUNIT keyword is unable to be located or
parsed, the reader will assume this unit. Defaults
to "adu".
datatype -- The output datatype. Defaults to MEFData, but could
be a subclass such as MOSFIREData. The main effect of
this is that it runs the appropriate verify method on
the data.
"""
try:
hdul = fits.open(file, 'readonly')
except FileNotFoundError as e:
print(e.msg)
raise e
except OSError as e:
print(e.msg)
raise e
md = read_hdul(hdul, defaultunit=defaultunit, datatype=datatype)
return md
##-------------------------------------------------------------------------
## Main Program
##-------------------------------------------------------------------------
def process(MEFpath):
# filters = ['i', 'Ha', 'SII']
filters = ['i', 'SII']
filternames = {'i': 'W-S-I+',
'Ha': 'N-A-L656',
'SII': 'N-A-L671'}
MEFpath = Path(MEFpath).expanduser()
# Create table of files
tablefile = MEFpath.parent.joinpath('files.txt')
if tablefile.exists() is True:
print(f"Reading {tablefile}")
t = Table.read(tablefile, format='ascii.csv')
else:
t = Table(names=('file', 'imtype', 'filter', 'object'),
dtype=('a200', 'a12', 'a12', 'a50') )
for file in MEFpath.glob('MEF*.fits'):
MEF = fits_MEFdata_reader(file)
t.add_row((file, MEF.get('DATA-TYP'), MEF.get('FILTER01'),
MEF.get('OBJECT')))
t.write(tablefile, format='ascii.csv')
##-------------------------------------------------------------------------
## Build Master Bias
##-------------------------------------------------------------------------
master_bias_file = MEFpath.parent.joinpath('MasterBias.fits')
if master_bias_file.exists():
print(f"Reading Master Bias: {master_bias_file}")
master_bias = fits_MEFdata_reader(master_bias_file)
else:
biases = t[t['imtype'] == 'BIAS']
print(f'Processing {len(biases)} BIAS files')
bias_MEFs = []
for i,file in enumerate(biases['file']):
file = Path(file).expanduser()
print(f' Reading {i+1:3d}/{len(biases):3d}: {file}')
MEF = fits_MEFdata_reader(file)
print(f' Creating deviation')
MEF.create_deviation()
print(f' Gain correcting')
MEF.gain_correct()
print(f' Assembling Chips')
MEF = read_hdul(MEF.assemble())
bias_MEFs.append(MEF)
print(f"Building master bias")
master_bias = bias_MEFs[0]
for i,pd in enumerate(master_bias.pixeldata):
pds = [bias.pixeldata[i] for bias in bias_MEFs]
master_bias.pixeldata[i] = ccdproc.combine(pds, method='average',
clip_extrema=True, nlow=1, nhigh=1)
print(f"Writing: {master_bias_file}")
master_bias.write(master_bias_file)
master_bias = fits_MEFdata_reader(master_bias_file)
##-------------------------------------------------------------------------
## Build Master DOMEFLAT (i filter)
##-------------------------------------------------------------------------
master_flats = {}
for filt in filters:
master_flat_file = MEFpath.parent.joinpath(f'DomeFlat_{filt}.fits')
if master_flat_file.exists():
print(f"Reading Master Flat: {master_flat_file}")
master_flats[filt] = fits_MEFdata_reader(master_flat_file)
else:
domeflats = t[(t['imtype'] == 'DOMEFLAT')\
& (t['filter'] == filternames[filt])]
print(f'Found {len(domeflats)} DOMEFLAT files in the {filt} filter')
domeflat_MEFs = []
for i,file in enumerate(domeflats['file']):
file = Path(file).expanduser()
print(f' Reading {i+1:3d}/{len(domeflats):3d}: {file.name}')
MEF = fits_MEFdata_reader(file)
print(f' Creating deviation')
MEF.create_deviation()
print(f' Gain correcting')
MEF.gain_correct()
print(f' Assembling Chips')
MEF = read_hdul(MEF.assemble())
print(f' Bias subtracting')
MEF.bias_subtract(master_bias)
domeflat_MEFs.append(MEF)
print(f"Generating Master Flat for {filt} Filter")
master_flat = domeflat_MEFs[0]
for i,pd in enumerate(master_flat.pixeldata):
pds = [im.pixeldata[i] for im in domeflat_MEFs]
master_flat.pixeldata[i] = ccdproc.combine(pds, method='average',
sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,
scale=np.median)
master_flats[filt] = master_flat
print(f"Writing: {master_flat_file}")
master_flat.write(master_flat_file)
##-------------------------------------------------------------------------
## Process Science Frames (i filter)
##-------------------------------------------------------------------------
outdir = MEFpath.parent.joinpath('MEF10')
for filt in filters:
images = t[(t['imtype'] == 'OBJECT')\
& (t['filter'] == filternames[filt])]
print(f'Processing {len(images)} OBJECT files in the {filt} filter')
image_MEFs = []
for i,file in enumerate(images['file']):
file = Path(file).expanduser()
print(f' Reading {i+1:3d}/{len(images):3d}: {file.name}')
MEF = fits_MEFdata_reader(file)
print(f' Creating deviation')
MEF.create_deviation()
print(f' Gain correcting')
MEF.gain_correct()
# print(f' Cosmic ray cleaning')
# MEF.la_cosmic()
print(f' Assembling Chips')
MEF = read_hdul(MEF.assemble())
print(f' Bias subtracting')
MEF.bias_subtract(master_bias)
print(f' Flat fielding')
MEF.flat_correct(master_flats[filt])
outfile = outdir.joinpath(file.name)
print(f' Writing: {outfile}')
MEF.write(outfile)
if __name__ == '__main__':
# MEFpath = Path('/Volumes/ScienceData/SuPrimeCam/SuPrimeCam_S17A-UH16A/Processed/MEF')
MEFpath = Path('~/Sync/ScienceData/SuPrimeCam/MEF').expanduser()
process(MEFpath)
``` |
{
"source": "Josh-Walker-GM/pdf-marking-collator",
"score": 3
} |
#### File: Josh-Walker-GM/pdf-marking-collator/bulk_collator.py
```python
from openpyxl import Workbook
from openpyxl.styles import Font
from openpyxl.utils import get_column_letter
from openpyxl.chart import BarChart, Reference
from openpyxl import load_workbook
from openpyxl.styles import PatternFill, Font
import os
import subprocess
import argparse
import logging
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("directories", nargs='+', help="list of directories to collate together")
parser.add_argument("--generate-individual-spreadsheet", type=bool, default=False, action=argparse.BooleanOptionalAction, help="generate marks spreadsheet for individual collations")
parser.add_argument("--use-individual-spreadsheet", type=bool, default=False, action=argparse.BooleanOptionalAction, help="use marks spreadsheet to override pdf marks for individual collations")
parser.add_argument("--generate-combined-spreadsheet", type=bool, default=False, action=argparse.BooleanOptionalAction, help="generate spreadsheet of all collated marks")
parser.add_argument("--use-combined-spreadsheet", type=bool, default=False, action=argparse.BooleanOptionalAction, help="use combined spreadsheet to override marks from collated pdfs")
return parser.parse_args()
def generate_combined_spreadsheet(args):
directories: list[str] = args.directories
combined_wb = Workbook()
combined_ws = combined_wb.active
row_offset = 2
column_offset = 2
directory_index = 0
for directory in directories:
individual_wb = load_workbook(os.path.join(directory, "extracted_marks.xlsx"))
individual_ws = individual_wb.active
marker_count = 0
while True:
if individual_ws.cell(2, marker_count + 3).value is None:
break
marker_count += 1
question_count = 0
while True:
if individual_ws.cell(question_count + 4, 2).value is None:
break
question_count += 1
combined_ws.cell(row_offset, column_offset).value = directory
for row in range(question_count + 2):
for column in range(marker_count + 1):
combined_ws.cell(row_offset + row + 1, column_offset + column).value = individual_ws.cell(row + 2, column + 2).value
combined_ws.cell(row_offset + question_count + 4, column_offset).value = "Total"
for column in range(marker_count):
combined_ws.cell(row_offset + question_count + 4, column_offset + column + 1).value = "=SUM({}{}:{}{})".format(get_column_letter(column_offset + column + 1), row_offset + 3, get_column_letter(column_offset + column + 1), row_offset + 2 + question_count)
combined_ws.cell(row_offset + question_count + 4, column_offset + marker_count + 2).value = "=SUM({}{}:{}{})".format(get_column_letter(column_offset + marker_count + 2), row_offset + 3, get_column_letter(column_offset + marker_count + 2), row_offset + 2 + question_count)
combined_ws.cell(row_offset + 2, column_offset + marker_count + 2).value = "Average"
for row in range(question_count):
combined_ws.cell(row_offset + row + 3, column_offset + marker_count + 2).value = "=AVERAGE({}{}:{}{})".format(get_column_letter(column_offset + 1), row_offset + row + 3, get_column_letter(column_offset + marker_count), row_offset + row + 3)
# create bar chart for marking data visualisation
chart = BarChart()
chart.type = "col"
chart.style = 10
chart.y_axis.title = "Mark Given"
chart.x_axis.title = "Question ID"
data = Reference(combined_ws, min_col=column_offset+1, min_row=row_offset+2, max_row=row_offset+question_count+2, max_col=column_offset+marker_count)
cats = Reference(combined_ws, min_col=column_offset, min_row=row_offset+3, max_row=row_offset+question_count+2)
chart.add_data(data, titles_from_data=True)
chart.set_categories(cats)
chart.height = 0.55 * (question_count + 5)
chart.width = 3 * (question_count)
combined_ws.add_chart(chart, "{}{}".format(get_column_letter(column_offset + marker_count + 4), row_offset))
for column in range(column_offset, column_offset + marker_count + 3):
combined_ws.cell(row_offset + question_count + 6, column).fill = PatternFill(fill_type="solid", start_color="00000000")
combined_ws.row_dimensions[row_offset + question_count + 6].height = 7.5
directory_index += 1
row_offset += question_count + 8
font_standard = Font(name="Calibri", size=11, bold=False, italic=False, vertAlign=None, underline="none", strike=False, color="FF000000")
font_bold = Font(name="Calibri", size=11, bold=True, italic=False, vertAlign=None, underline="none", strike=False, color="FF000000")
for row in range(row_offset):
combined_ws.row_dimensions[row].font = font_standard
row_offset = 2
column_offset = 2
for directory in directories:
combined_ws.cell(row_offset, column_offset).font = font_bold
row_offset += question_count + 8
save_directory = os.path.abspath(os.path.join(directories[0], os.pardir))
combined_wb.save(filename=os.path.join(save_directory, "combined_extracted_marks.xlsx"))
def use_combined_spreadsheet(args):
directories: list[str] = args.directories
save_directory = os.path.abspath(os.path.join(directories[0], os.pardir))
combined_wb = load_workbook(os.path.join(save_directory, "combined_extracted_marks.xlsx"))
combined_ws = combined_wb.active
row_offset = 2
column_offset = 2
directory_index = 0
for directory in directories:
individual_wb = load_workbook(os.path.join(directory, "extracted_marks.xlsx"))
individual_ws = individual_wb.active
marker_count = 0
while True:
if combined_ws.cell(row_offset + 2, column_offset + marker_count + 1).value is None:
break
marker_count += 1
question_count = 0
while True:
if combined_ws.cell(row_offset + question_count + 3, column_offset).value is None:
break
question_count += 1
for row in range(question_count):
for col in range(marker_count):
individual_ws.cell(4 + row, 3 + col).value = combined_ws.cell(row + 3 + row_offset, col + column_offset + 1).value
individual_wb.save(filename=os.path.join(directory, "extracted_marks.xlsx"))
directory_index += 1
row_offset += question_count + 8
def main():
# set logging format
logging.basicConfig(format='%(asctime)s: %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
# extract agruments using argparse standard lib
args = get_arguments()
# validate against usage of override with and generate spreadsheet features together
if args.generate_combined_spreadsheet and args.use_combined_spreadsheet:
logging.error("Cannot use overriding spreadsheet and generate spreadsheet features at the same time!")
exit(-1)
# validate against usage of override with and generate individual spreadsheet flags together
if args.generate_individual_spreadsheet and args.use_individual_spreadsheet:
logging.error("Cannot use both use and generate individual spreadsheet flags together!")
exit(-1)
directories: list[str] = args.directories
# validate all collation directories are unique
if len(directories) != len(set(directories)):
logging.error("Collation directory list cannot contain duplicates!")
exit(-1)
# validate all collation directories exist
for directory in directories:
if not os.path.exists(os.path.join(os.getcwd(), directory)):
logging.error("Collation directory \"{}\" does not exist!".format(os.path.join(os.getcwd(), directory)))
exit(-1)
if args.use_combined_spreadsheet:
logging.info("Using combined spreadsheet to override pdf marks.")
save_directory = os.path.abspath(os.path.join(directories[0], os.pardir))
if not os.path.exists(os.path.join(save_directory, "combined_extracted_marks.xlsx")):
logging.error("Combined marks spreadsheet does not exist in \"{}\"!".format(save_directory))
exit(-1)
# validate individual projects have extracted marks spreadsheets
for directory in directories:
if not os.path.exists(os.path.join(directory, "extracted_marks.xlsx")):
logging.error("Extracted marks spreadsheet does not exist for \"{}\"!".format(directory))
exit(-1)
use_combined_spreadsheet(args)
# Dev note: Calling of commands formed from user input is dangerous - should check/sanitise this...
for directory in directories:
logging.info("Collating {}.".format(directory))
command_string = "python collator.py {} {}.pdf".format(directory, os.path.basename(os.path.normpath(directory)))
if args.generate_individual_spreadsheet or args.generate_combined_spreadsheet:
command_string = "{} {}".format(command_string, "--generate-spreadsheet")
if args.use_individual_spreadsheet or args.use_combined_spreadsheet:
command_string = "{} {}".format(command_string, "--use-spreadsheet")
return_code = subprocess.call(command_string, shell=True)
if return_code != 0:
logging.error("Collation of \"{}\" failed!".format(directory))
exit(-1)
if args.generate_combined_spreadsheet:
logging.info("Generating combined spreadsheet.")
# validate individual projects have extracted marks spreadsheets
for directory in directories:
if not os.path.exists(os.path.join(directory, "extracted_marks.xlsx")):
logging.error("Extracted marks spreadsheet does not exist for \"{}\"!".format(directory))
exit(-1)
generate_combined_spreadsheet(args)
if __name__ == "__main__":
main()
``` |
{
"source": "joshwapohlmann/home-assistant",
"score": 2
} |
#### File: components/plex/sensor.py
```python
from datetime import timedelta
import logging
import plexapi.exceptions
import requests.exceptions
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from .const import DOMAIN as PLEX_DOMAIN, SERVERS
DEFAULT_NAME = "Plex"
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Plex sensor."""
if discovery_info is None:
return
plexserver = list(hass.data[PLEX_DOMAIN][SERVERS].values())[0]
add_entities([PlexSensor(plexserver)], True)
class PlexSensor(Entity):
"""Representation of a Plex now playing sensor."""
def __init__(self, plex_server):
"""Initialize the sensor."""
self._name = DEFAULT_NAME
self._state = None
self._now_playing = []
self._server = plex_server
self._unique_id = f"sensor-{plex_server.machine_identifier}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the id of this plex client."""
return self._unique_id
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return "Watching"
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {content[0]: content[1] for content in self._now_playing}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update method for Plex sensor."""
try:
sessions = self._server.sessions()
except plexapi.exceptions.BadRequest:
_LOGGER.error(
"Error listing current Plex sessions on %s", self._server.friendly_name
)
return
except requests.exceptions.RequestException as ex:
_LOGGER.warning(
"Temporary error connecting to %s (%s)", self._server.friendly_name, ex
)
return
now_playing = []
for sess in sessions:
user = sess.usernames[0]
device = sess.players[0].title
now_playing_user = f"{user} - {device}"
now_playing_title = ""
if sess.TYPE == "episode":
# example:
# "Supernatural (2005) - S01 · E13 - Route 666"
season_title = sess.grandparentTitle
if sess.show().year is not None:
season_title += " ({0})".format(sess.show().year)
season_episode = "S{0}".format(sess.parentIndex)
if sess.index is not None:
season_episode += f" · E{sess.index}"
episode_title = sess.title
now_playing_title = "{0} - {1} - {2}".format(
season_title, season_episode, episode_title
)
elif sess.TYPE == "track":
# example:
# "Billy Talent - Afraid of Heights - Afraid of Heights"
track_artist = sess.grandparentTitle
track_album = sess.parentTitle
track_title = sess.title
now_playing_title = "{0} - {1} - {2}".format(
track_artist, track_album, track_title
)
else:
# example:
# "picture_of_last_summer_camp (2015)"
# "The Incredible Hulk (2008)"
now_playing_title = sess.title
if sess.year is not None:
now_playing_title += f" ({sess.year})"
now_playing.append((now_playing_user, now_playing_title))
self._state = len(sessions)
self._now_playing = now_playing
``` |
{
"source": "JoshWarby/LWM",
"score": 3
} |
#### File: JoshWarby/LWM/LWM.py
```python
import tkinter as tk
import tkinter.scrolledtext as tkst
import tkinter.ttk as ttk
import datetime
import sqlite3
import re
from passlib.hash import sha256_crypt
# Regular expressions used in validating.
dateregex="""(^(((0[1-9]|1[0-9]|2[0-8])[\/](0[1-9]|1[012]))|((29|30|31)[\/](0[13578]|1[02]))|((29|30)[\/](0[4,6,9]|11)))[\/](19|[2-9][0-9])\d\d$)|(^29[\/]02[\/](19|[2-9][0-9])(00|0408||12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96)$)"""
emailregex="""[^@]+@[^@]+\.[^@]+"""
# Creating database and tables.
conn = sqlite3.connect("data\LWDB.db")
cursor = conn.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS Events (
EventID INTEGER PRIMARY KEY ASC AUTOINCREMENT,
Name VARCHAR,
Date VARCHAR,
VenueID INTEGER REFERENCES Venues ( VenueID ),
TrainerID INTEGER REFERENCES Trainers ( TrainerID ),
TrainerPaid REAL
);
""")
cursor.execute("""CREATE TABLE IF NOT EXISTS Trainers (
TrainerID INTEGER PRIMARY KEY ASC AUTOINCREMENT,
FName VARCHAR,
LName VARCHAR,
ContactNo VARCHAR,
ContactEmail VARCHAR,
TotalPaid REAL,
TotalEvents INT
);
""")
cursor.execute("""CREATE TABLE IF NOT EXISTS Venues (
VenueID INTEGER PRIMARY KEY ASC AUTOINCREMENT,
Name VARCHAR,
Capacity INTEGER,
ContactNo VARCHAR
);
""")
cursor.execute("""CREATE TABLE IF NOT EXISTS EventTasks (
TasksID INTEGER PRIMARY KEY ASC AUTOINCREMENT
REFERENCES Events ( EventID ),
DeligateListSent BOOLEAN,
PaperWorkRecorded BOOLEAN,
CertificatesSent BOOLEAN,
ClientInvoiced BOOLEAN,
PayReceived BOOLEAN
);
""")
conn.commit()
#Tkinter Necessities
class Application(tk.Frame):
def __init__(self, master):
self.master = master
tk.Frame.__init__(self, self.master)
# Main function foir displaying data into all tables.
def DisplayData():
# HOME PAGE OVERVIEW
for row in EventTable.get_children():
EventTable.delete(row)
cursor.execute(
"SELECT EventID, Name,Date,VenueID,TrainerID FROM Events")
EventPulledData = cursor.fetchall()
for row in EventPulledData:
EventTable.insert(
"", 0, text="Row", values=(row[0], row[1], row[2], row[3], row[4]))
for row in TrainerTable.get_children():
TrainerTable.delete(row)
cursor.execute(
"SELECT TrainerID, FName,ContactNo,ContactEmail FROM Trainers")
TrainerPulledData = cursor.fetchall()
for row in TrainerPulledData:
TrainerTable.insert(
"", 0, text="Row", values=(row[0], row[1], row[2], row[3]))
for row in VenueTable.get_children():
VenueTable.delete(row)
cursor.execute(
"SELECT VenueID, Name, ContactNo FROM Venues")
VenuePulledData = cursor.fetchall()
for row in VenuePulledData:
VenueTable.insert(
"", 0, text="Row", values=(row[0], row[1], row[2]))
# CountDowns
for row in TimeTable.get_children():
TimeTable.delete(row)
cursor.execute(
"SELECT EventID,Date FROM Events")
IDandDate = cursor.fetchall()
lizt = []
for i in IDandDate:
maths = str(datetime.datetime.strptime(
i[1], '%d/%m/%Y').date() - datetime.datetime.now().date())[:-9]
lizt += [(str(i[0]), maths)]
for row in lizt:
TimeTable.insert(
"", 0, text="Row", values=(row[0], row[1]))
# MAIN PAGES
for row in TaskBigTable.get_children():
TaskBigTable.delete(row)
cursor.execute(
"SELECT TasksID,DeligateListSent,PaperWorkRecorded,CertificatesSent,ClientInvoiced,PayReceived FROM EventTasks")
TasksPulledData = cursor.fetchall()
TaskList=list(map(list, TasksPulledData))
lookup = {0: "Incomplete", 1: "Complete"}
for i, j in enumerate(TaskList):
for k, l in enumerate(j):
if k == 0:
continue
TaskList[i][k] = lookup[TaskList[i][k]]
for row in TaskList:
TaskBigTable.insert(
"", 0, text="Row", values=(row[0], row[1], row[2], row[3], row[4], row[5]))
for row in TrainerBigTable.get_children():
TrainerBigTable.delete(row)
cursor.execute(
"SELECT TrainerID, FName,LName,ContactNo,ContactEmail,TotalPaid,TotalEvents FROM Trainers")
TrainerPulledData = cursor.fetchall()
for row in TrainerPulledData:
TrainerBigTable.insert("", 0, text="Row", values=(
row[0], row[1], row[2], row[3], row[4], row[5], row[6]))
for row in VenueBigTable.get_children():
VenueBigTable.delete(row)
cursor.execute(
"SELECT VenueID, Name, Capacity, ContactNo FROM Venues")
VenuePulledData = cursor.fetchall()
for row in VenuePulledData:
VenueBigTable.insert(
"", 0, text="Row", values=(row[0], row[1], row[2], row[3]))
# Main function for inserting data into the database.
def InsertData(whichlist):
#DML = "delete from sqlite_sequence where name='Events';"
#cursor.execute(DML)
#conn.commit()
#DML = "delete from sqlite_sequence where name='EventTasks';"
#cursor.execute(DML)
#conn.commit()
for i in TrainerIDEntry_H.get() + VenueIDEntry_H.get():
if i not in "0123456789":
PopupMessage("ID Wrong Format")
return
if DateEntry.get() != "":
if not re.match(dateregex, DateEntry.get()):
PopupMessage("Date Wrong Format")
return
if ContactEEntry.get() != "":
if not re.match(emailregex, ContactEEntry.get()):
PopupMessage("Email Wrong Format")
return
if TrainerPaidEntry.get() != "":
try:
float(TrainerPaidEntry.get())
except:
PopupMessage("Paid Wrong Format")
return
zero = 0
dics = {"EventEntryList":
[NameEntry, DateEntry, VenueIDEntry_H,
TrainerIDEntry_H, TrainerPaidEntry],
"TrainerEntryList":
[FNameEntry, LNameEntry, ContactNoEntry,
ContactEEntry, PaidEntry, zero],
"TaskEntryList":
[DelVar, PapVar, CertVar, InvVar, PayVar],
"VenueEntryList":
[VNameEntry, CapacityEntry, ContactVEntry]}
if whichlist == "EventEntryList":
Table = "Events"
DML = "INSERT INTO EventTasks VALUES (NULL,0,0,0,0,0);"
cursor.execute(DML)
conn.commit()
if whichlist == "TrainerEntryList":
Table = "Trainers"
if whichlist == "VenueEntryList":
Table = "Venues"
DML = "INSERT INTO %s VALUES (NULL" % Table
for i in dics[whichlist]:
if i in(NameEntry, FNameEntry, LNameEntry, ContactNoEntry, ContactEEntry, VNameEntry, ContactVEntry, DateEntry):
x = '"' + str(i.get()) + '"'
else:
try:
x = str(i.get())
except:
x = str(0)
DML += "," + x
DML += ");"
cursor.execute(DML)
conn.commit()
if whichlist == "EventEntryList":
DML = "UPDATE Trainers SET TotalPaid = TotalPaid + {} WHERE TrainerID = {}".format(
TrainerPaidEntry.get(), TrainerIDEntry_H.get())
cursor.execute(DML)
conn.commit()
DML = "UPDATE Trainers SET TotalEvents = TotalEvents + 1 WHERE TrainerID = {}".format(
TrainerIDEntry_H.get())
cursor.execute(DML)
conn.commit()
DisplayData()
# Function for updating tix box values within the database.
def TickBoxUpdate(ID,Del,Pap,Cert,Inv,Pay):
if ID != "":
try:
int(ID)
except:
PopupMessage("TaskID Wrong Format")
return
DML = "UPDATE EventTasks SET DeligateListSent ="+str(Del)+", PaperWorkRecorded = "+str(Pap)+", CertificatesSent = "+str(Cert)+",ClientInvoiced = "+str(Inv)+", PayReceived = "+str(Pay)+" WHERE TasksID = "+str(ID)
cursor.execute(DML)
conn.commit()
DisplayData()
# Function for editing the event table.
def EditEventTable(ID,Name,Date,VenID,TrainID,TrainPaid):
if ID != "":
try:
int(ID)
except:
PopupMessage("EventID Wrong Format")
return
DML = "UPDATE Events SET Name = {}, Date = {}, VenueID = {},TrainerID = {},TrainerPaid = {} WHERE EventID = {}".format(('"'+Name+'"'),('"'+Date+'"'),('"'+VenID+'"'),('"'+TrainID+'"'),('"'+TrainPaid+'"'),('"'+ID+'"'),)
cursor.execute(DML)
conn.commit()
DisplayData()
# Function for editing the venue table.
def EditVenueTable(ID,Name,Cap,Contact):
print(ID)
if ID != "":
try:
int(ID)
except:
PopupMessage("TaskID Wrong Format")
return
DML = "UPDATE Venues SET Name = {}, Capacity = {}, ContactNo = {} WHERE VenueID = {}".format(('"'+Name+'"'),('"'+Cap+'"'),('"'+Contact+'"'),('"'+ID+'"'))
cursor.execute(DML)
conn.commit()
DisplayData()
# Function for editing the trainer table.
def EditTrainerTable(ID,FName,LName,ContactE,ContactNo,Paid):
print(ID)
if ID != "":
try:
int(ID)
except:
PopupMessage("TrainerID Wrong Format")
return
DML = "UPDATE Trainers SET FName = {}, LName = {}, ContactNo = {}, ContactEmail = {} , TotalPaid = {} WHERE TrainerID = {}".format(('"'+FName+'"'),('"'+LName+'"'),('"'+ContactE+'"'),('"'+ContactNo+'"'),('"'+Paid+'"'),('"'+ID+'"'))
cursor.execute(DML)
conn.commit()
DisplayData()
# Deletes any data from database.
def DeleteField(Table,ID):
if Table == "Events":
tableID = "EventID"
if Table == "Trainers":
tableID = "TrainerID"
if Table == "Venues":
tableID = "VenueID"
DML = "DELETE FROM {} WHERE {} = {}".format(Table,tableID,ID)
cursor.execute(DML)
conn.commit()
if Table == "Events":
Table = "EventTasks"
tableID = "TasksID"
DML = "DELETE FROM {} WHERE {} = {}".format(Table,tableID,ID)
cursor.execute(DML)
conn.commit()
DisplayData()
# "Forwards" pressing enter to checking password.
def passcheckenter():
passcheck(PE1)
# Used to switch between "Pages"
def switchto(frame):
frame.tkraise()
# Used to create popup messsages, used for errors.
def PopupMessage(message):
PopupMessage = tk.Tk()
PopupMessage.wm_title("ERROR!")
PopupMessage.configure(background="Yellow4")
label = tk.Label(
PopupMessage, bg="Yellow4", text=message, font=("Verdana", 12))
label.pack(side="top", fill="x", pady=10)
button1 = ttk.Button(
PopupMessage, text="Okay", command=lambda: PopupMessage.destroy())
button1.pack(padx=20, pady=10, side="bottom", fill="x")
PopupMessage.resizable(width=False, height=False)
PopupMessage.mainloop()
# Checks password against encryped password.
def passcheck(PE1):
#if sha256_crypt.verify( PE1.get(), "$5$rounds=535000$A13jp7js0fJDN97x$fZ9gRkQoRKtreWl/WGa2Bc.eYXYf3aKcnydcll445fB"):
if sha256_crypt.verify( PE1.get(), "$5$rounds=535000$AXcB5Fs8zYN5taGo$hwzOItn575Ar4H18YKG0ITdS1fbe8EbWxMoNOFqAcl4"):
switchto(HomeFrame)
TopFrame.grid(row=0, column=0, sticky='news')
else:
PopupMessage("Password Incorrect")
# Colours
topbarcolour = "white"
textboxcolour = "#00ACC1"
regbgcolour = "#0277BD"
loginbgcolour = "#424242"
# Create Frames
TopFrame = tk.Frame(self, bg=topbarcolour)
LoginFrame = tk.Frame(
self, width=875, height=600, bg=loginbgcolour, padx=320, pady=250)
HomeFrame = tk.Frame(
self, width=875, height=600, bg=regbgcolour, padx=20, pady=10)
TasksFrame = tk.Frame(
self, width=875, height=600, bg=regbgcolour, padx=20, pady=10)
TrainerFrame = tk.Frame(
self, width=875, height=600, bg=regbgcolour, padx=20, pady=10)
VenueFrame = tk.Frame(
self, width=875, height=600, bg=regbgcolour, padx=20, pady=10)
# Top Frame Buttons
ttk.Button(TopFrame,
text="Home",
command=lambda: switchto(HomeFrame)
).grid(row=0, column=0, sticky='w')
ttk.Button(TopFrame,
text="Tasks",
command=lambda: switchto(TasksFrame)
).grid(row=0, column=1, sticky='w')
ttk.Button(TopFrame,
text="Trainers",
command=lambda: switchto(TrainerFrame)
).grid(row=0, column=2, sticky='w')
ttk.Button(TopFrame,
text="Venues",
command=lambda: switchto(VenueFrame)
).grid(row=0, column=3, sticky='w')
# Login Frame
PL1 = tk.Label(LoginFrame, text="Password :")
PL1.grid(row=1, column=0, sticky="w")
PE1 = tk.Entry(LoginFrame, bd=2, show="*")
PE1.focus_set()
PE1.bind('<Return>', lambda event: passcheck(PE1))
PE1.grid(row=1, column=1,)
PassButton1 = ttk.Button(
LoginFrame, text="Submit", width=10,
command=lambda: passcheck(PE1))
PassButton1.grid(row=3, column=1, sticky="s")
# Home Page
photo = tk.PhotoImage(file="data\logo.png")
w = tk.Label(HomeFrame, image=photo, bg=regbgcolour)
w.photo = photo
w.place(x=250, y=0)
EventTable = ttk.Treeview(HomeFrame)
EventTable['show'] = 'headings'
EventTable["columns"]=("E ID","Name","Date","V ID","T ID")
EventTable.column("E ID", width=30)
EventTable.column("Name", width=60)
EventTable.column("Date", width=60)
EventTable.column("V ID", width=30)
EventTable.column("T ID", width=30)
EventTable.heading("E ID", text="ID")
EventTable.heading("Name", text="Name")
EventTable.heading("Date", text="Date")
EventTable.heading("V ID", text="V ID")
EventTable.heading("T ID", text="T ID")
EventTable.place(x=10, y=110)
TrainerTable = ttk.Treeview(HomeFrame)
TrainerTable['show'] = 'headings'
TrainerTable["columns"]=("T ID","First Name","Mob No",)
TrainerTable.column("T ID", width=30)
TrainerTable.column("<NAME>", width=120)
TrainerTable.column("Mob No", width=120)
TrainerTable.heading("T ID", text="ID")
TrainerTable.heading("First Name", text="First Name")
TrainerTable.heading("Mob No", text="Mob No")
TrainerTable.place(x=250, y=110)
VenueTable = ttk.Treeview(HomeFrame)
VenueTable['show'] = 'headings'
VenueTable["columns"]=("V ID","Name","Mob No",)
VenueTable.column("V ID", width=30)
VenueTable.column("Name", width=120)
VenueTable.column("Mob No", width=120)
VenueTable.heading("V ID", text="V ID")
VenueTable.heading("Name", text="Name")
VenueTable.heading("Mob No", text="Mob No")
VenueTable.place(x=550, y=110)
TimeTable = ttk.Treeview(HomeFrame, height=8)
TimeTable['show'] = 'headings'
TimeTable["columns"]=("Event ID","Time Until",)
TimeTable.column("Event ID", width=100)
TimeTable.column("Time Until", width=150)
TimeTable.heading("Event ID", text="Event ID")
TimeTable.heading("Time Until", text="Time Until")
TimeTable.place(x=275, y=400)
VenueL1 = tk.Label(HomeFrame, text="Venue :", bg=regbgcolour)
VenueL1.place(x=660, y=90)
TrainerL1 = tk.Label(HomeFrame, text="Trainer :", bg=regbgcolour)
TrainerL1.place(x=370, y=90)
EventL1 = tk.Label(HomeFrame, text="Event :", bg=regbgcolour)
EventL1.place(x=100, y=90)
EventIDLabel = tk.Label(
HomeFrame, text="Event ID (Edits Only)", bg=regbgcolour)
NameLabel = tk.Label(HomeFrame, text="Event Name", bg=regbgcolour)
DateLabel = tk.Label(HomeFrame, text="Date (DD/MM/YYYY)", bg=regbgcolour)
VenueIDLabel = tk.Label(HomeFrame, text="VenueID", bg=regbgcolour)
TrainerIDLabel = tk.Label(HomeFrame, text="TrainerID", bg=regbgcolour)
TrainerPaidLabel = tk.Label(
HomeFrame, text="Trainer Paid (£)", bg=regbgcolour)
EventIDEntry = tk.Entry(HomeFrame, bd=2, bg=textboxcolour)
NameEntry = tk.Entry(HomeFrame, bd=2, bg=textboxcolour)
DateEntry = tk.Entry(HomeFrame, bd=2, bg=textboxcolour)
VenueIDEntry_H = tk.Entry(HomeFrame, bd=2, bg=textboxcolour)
TrainerIDEntry_H = tk.Entry(HomeFrame, bd=2, bg=textboxcolour)
TrainerPaidEntry = tk.Entry(HomeFrame, bd=2, bg=textboxcolour)
EventIDEntry.place(x=0, y=370)
NameEntry.place(x=140, y=370)
DateEntry.place(x=280, y=370)
VenueIDEntry_H .place(x=420, y=370)
TrainerIDEntry_H.place(x=560, y=370)
TrainerPaidEntry.place(x=700, y=370)
EventIDLabel.place(x=0, y=340)
NameLabel.place(x=140, y=340)
DateLabel.place(x=280, y=340)
VenueIDLabel.place(x=420, y=340)
TrainerIDLabel.place(x=560, y=340)
TrainerPaidLabel.place(x=700, y=340)
EventSubmitButton = ttk.Button(HomeFrame,
text="Submit New Event",
command=lambda: InsertData(
"EventEntryList")
)
EventSubmitButton.place(x=0, y=400)
DeleteLabel = tk.Label(HomeFrame, text="Delete Event", bg=regbgcolour)
DeleteEventEntry = tk.Entry(HomeFrame, bd=2, bg=textboxcolour)
DeleteEventButon = ttk.Button(HomeFrame, text="Delete Event", command=lambda: DeleteField("Events", DeleteEventEntry.get()))
DeleteLabel.place(x=0, y=450)
DeleteEventEntry.place(x=0, y=480)
DeleteEventButon.place(x=140, y=480)
EditEventButon = ttk.Button(HomeFrame, text="Edit", command=lambda: EditEventTable(EventIDEntry.get(), NameEntry.get(), DateEntry.get(), VenueIDEntry_H.get(), TrainerIDEntry_H.get(), TrainerPaidEntry.get()))
EditEventButon.place(x=120, y=400)
# Trainer Page
TrainerPageLabel = tk.Label(
TrainerFrame, text="Trainers", bg=regbgcolour, font=("Helvetica", 30))
TrainerPageLabel.place(x=300, y=0)
TrainerIDLabel = tk.Label(
TrainerFrame, text="Trainer ID (Edits Only)", bg=regbgcolour)
FNameLabel = tk.Label(TrainerFrame, text="<NAME>", bg=regbgcolour)
LNameLabel = tk.Label(TrainerFrame, text="<NAME>", bg=regbgcolour)
ContactNoIDLabel = tk.Label(
TrainerFrame, text="Contact No", bg=regbgcolour)
ContactEIDLabel = tk.Label(
TrainerFrame, text="Contact Email", bg=regbgcolour)
Paid = tk.Label(
TrainerFrame, text="Trainer Total Paid (£)", bg=regbgcolour)
TrainerIDEntry = tk.Entry(TrainerFrame, bd=2, bg=textboxcolour)
FNameEntry = tk.Entry(TrainerFrame, bd=2, bg=textboxcolour)
LNameEntry = tk.Entry(TrainerFrame, bd=2, bg=textboxcolour)
ContactNoEntry = tk.Entry(TrainerFrame, bd=2, bg=textboxcolour)
ContactEEntry = tk.Entry(TrainerFrame, bd=2, bg=textboxcolour)
PaidEntry = tk.Entry(TrainerFrame, bd=2, bg=textboxcolour)
TrainerIDEntry.place(x=0, y=370)
FNameEntry.place(x=140, y=370)
LNameEntry.place(x=280, y=370)
ContactNoEntry.place(x=420, y=370)
ContactEEntry.place(x=560, y=370)
PaidEntry.place(x=700, y=370)
TrainerIDLabel.place(x=0, y=340)
FNameLabel.place(x=140, y=340)
LNameLabel.place(x=280, y=340)
ContactNoIDLabel.place(x=420, y=340)
ContactEIDLabel.place(x=560, y=340)
Paid.place(x=700, y=340)
TrainerSubmitButton = ttk.Button(TrainerFrame,
text="Submit New Trainer",
command=lambda: InsertData(
"TrainerEntryList")
)
TrainerSubmitButton.place(x=0, y=400)
DeleteLabelTrainer = tk.Label(
TrainerFrame, text="Delete Trainer", bg=regbgcolour)
DeleteTrainerEntry = tk.Entry(TrainerFrame, bd=2, bg=textboxcolour)
DeleteTrainerButon = ttk.Button(
TrainerFrame, text = "Delete Trainer", command=lambda: DeleteField("Trainers",DeleteTrainerEntry.get()))
DeleteLabelTrainer.place(x=0, y=450)
DeleteTrainerEntry.place(x=0, y=480)
DeleteTrainerButon.place(x=140, y=480)
TrainerBigTable = ttk.Treeview(TrainerFrame)
TrainerBigTable['show'] = 'headings'
TrainerBigTable["columns"]=("T ID","FName","LName","Contact No","Contact Email","TotalPaid","TotalEvents")
TrainerBigTable.column("T ID", width=60)
TrainerBigTable.heading("T ID", text="T ID")
TrainerBigTable.column("FName", width=120)
TrainerBigTable.heading("FName", text="FName")
TrainerBigTable.column("LName", width=120)
TrainerBigTable.heading("LName", text="LName")
TrainerBigTable.column("Contact No", width=120)
TrainerBigTable.heading("Contact No", text="Contact No")
TrainerBigTable.column("Contact Email", width=120)
TrainerBigTable.heading("Contact Email", text="Contact Email")
TrainerBigTable.column("TotalPaid", width=120)
TrainerBigTable.heading("TotalPaid", text="TotalPaid")
TrainerBigTable.column("TotalEvents", width=120)
TrainerBigTable.heading("TotalEvents", text="TotalEvents")
TrainerBigTable.place(x=0, y=50)
TrainerEventButon = ttk.Button(TrainerFrame, text="Edit", command=lambda: EditTrainerTable(TrainerIDEntry.get(),FNameEntry.get(),LNameEntry.get(),ContactNoEntry.get(),ContactEEntry.get(),PaidEntry.get()))
TrainerEventButon.place(x=120, y=400)
#Tasks Page
TasksPageLabel = tk.Label(
TasksFrame, text="Tasks", bg=regbgcolour, font=("Helvetica", 30))
TasksPageLabel.place(x=300, y=0)
DelVar = tk.IntVar()
PapVar = tk.IntVar()
CertVar = tk.IntVar()
InvVar = tk.IntVar()
PayVar = tk.IntVar()
TaskIDEntry = tk.Entry(TasksFrame, bd=2, bg=textboxcolour)
DelCheck = tk.Checkbutton(TasksFrame, text="Deligate List", variable=DelVar,
onvalue=1, offvalue=0, height=5,
width=20, bg=regbgcolour, activebackground=textboxcolour)
PapCheck = tk.Checkbutton(TasksFrame, text="Paper Work", variable=PapVar,
onvalue=1, offvalue=0, height=5,
width=20, bg=regbgcolour, activebackground=textboxcolour)
CertCheck = tk.Checkbutton(TasksFrame, text="Certificates", variable=CertVar,
onvalue=1, offvalue=0, height=5,
width=20, bg=regbgcolour, activebackground=textboxcolour)
InvCheck = tk.Checkbutton(TasksFrame, text="Invoice", variable=InvVar,
onvalue=1, offvalue=0, height=5,
width=20, bg=regbgcolour, activebackground=textboxcolour)
PayCheck = tk.Checkbutton(TasksFrame, text="Payed", variable=PayVar,
onvalue=1, offvalue=0, height=5,
width=20, bg=regbgcolour, activebackground=textboxcolour)
TaskIDLabel = tk.Label(TasksFrame, text="Event ID", bg=regbgcolour)
TaskIDLabel.place(x=0, y=375)
TaskIDEntry.place(x=0, y=400)
DelCheck.place(x=140, y=370)
PapCheck.place(x=280, y=370)
CertCheck.place(x=420, y=370)
InvCheck.place(x=560, y=370)
PayCheck .place(x=700, y=370)
TaskSubmitButton = ttk.Button(TasksFrame,
text="Update Task",
command=lambda: TickBoxUpdate(TaskIDEntry.get(),DelVar.get(), PapVar.get(), CertVar.get(), InvVar.get(), PayVar.get())
)
TaskSubmitButton.place(x=0, y=450)
TaskBigTable = ttk.Treeview(TasksFrame)
TaskBigTable['show'] = 'headings'
TaskBigTable["columns"]=("E ID","Deligate List","Paper Work","Certificates","Invoiced","Payed")
TaskBigTable.column("E ID", width=60)
TaskBigTable.heading("E ID", text="E ID")
TaskBigTable.column("Deligate List", width=120)
TaskBigTable.heading("Deligate List", text="Deligate List")
TaskBigTable.column("Paper Work", width=120)
TaskBigTable.heading("Paper Work", text="Paper Work")
TaskBigTable.column("Certificates", width=120)
TaskBigTable.heading("Certificates", text="Certificates Sent")
TaskBigTable.column("Invoiced", width=120)
TaskBigTable.heading("Invoiced", text="Invoiced")
TaskBigTable.column("Payed", width=120)
TaskBigTable.heading("Payed", text="Payed")
TaskBigTable.place(x=75, y=50)
#Venue Page
VenuePageLabel = tk.Label(
VenueFrame, text="Venue", bg=regbgcolour, font=("Helvetica", 30))
VenuePageLabel.place(x=300, y=0)
VenueIDLabel = tk.Label(
VenueFrame, text="Venue ID (Edits Only)", bg=regbgcolour)
VNameLabel = tk.Label(VenueFrame, text="Venue Name", bg=regbgcolour)
CapacityLabel = tk.Label(VenueFrame, text="Capacity", bg=regbgcolour)
VContactNoIDLabel = tk.Label(
VenueFrame, text="Contact No", bg=regbgcolour)
VenueIDEntry = tk.Entry(VenueFrame, bd=2, bg=textboxcolour)
VNameEntry = tk.Entry(VenueFrame, bd=2, bg=textboxcolour)
CapacityEntry = tk.Entry(VenueFrame, bd=2, bg=textboxcolour)
ContactVEntry = tk.Entry(VenueFrame, bd=2, bg=textboxcolour)
VenueIDEntry.place(x=0, y=370)
VNameEntry.place(x=140, y=370)
CapacityEntry.place(x=280, y=370)
ContactVEntry.place(x=420, y=370)
VenueIDLabel.place(x=0, y=340)
VNameLabel.place(x=140, y=340)
CapacityLabel.place(x=280, y=340)
VContactNoIDLabel.place(x=420, y=340)
VenueSubmitButton = ttk.Button(VenueFrame,
text="Submit New Venue",
command=lambda: InsertData(
"VenueEntryList")
)
VenueSubmitButton.place(x=0, y=400)
DeleteLabelVenue = tk.Label(
VenueFrame, text="Delete Venue", bg=regbgcolour)
DeleteVenueEntry = tk.Entry(VenueFrame, bd=2, bg=textboxcolour)
DeleteVenueButon = ttk.Button(VenueFrame, text="Delete Venue", command=lambda: DeleteField("Venues", DeleteVenueEntry.get()))
DeleteLabelVenue.place(x=0, y=450)
DeleteVenueEntry.place(x=0, y=480)
DeleteVenueButon.place(x=140, y=480)
VenueBigTable = ttk.Treeview(VenueFrame)
VenueBigTable['show'] = 'headings'
VenueBigTable["columns"] = ("V ID", "Name", "Capacity", "ContactNo")
VenueBigTable.column("V ID", width=60)
VenueBigTable.heading("V ID", text="V ID")
VenueBigTable.column("Name", width=120)
VenueBigTable.heading("Name", text="Name")
VenueBigTable.column("Capacity", width=120)
VenueBigTable.heading("Capacity", text="Capacity")
VenueBigTable.column("ContactNo", width=120)
VenueBigTable.heading("ContactNo", text="ContactNo")
VenueBigTable.place(x=175, y=50)
VenueEventButon = ttk.Button(VenueFrame, text="Edit", command=lambda: EditVenueTable(VenueIDEntry.get(), VNameEntry.get(), CapacityEntry.get(), ContactVEntry.get()))
VenueEventButon.place(x=120, y=400)
#Setup
LoginFrame.grid(row=1, column=0, sticky='news')
HomeFrame.grid(row=1, column=0, sticky='news')
TasksFrame.grid(row=1, column=0, sticky='news')
TrainerFrame.grid(row=1, column=0, sticky='news')
VenueFrame.grid(row=1, column=0, sticky='news')
self.grid()
switchto(LoginFrame)
DisplayData()
#Tkinter Necessities
root = tk.Tk()
root.title("LW Manager")
root.iconbitmap('data\LWicon.ico')
root.resizable(width=False, height=False)
Application(root).mainloop()
root.destroy
``` |
{
"source": "joshwarecom/joshwarecom.github.com",
"score": 2
} |
#### File: security/goatc/goatc.py
```python
import util, pprint, commands
#initialize main python module
debugmsg = util.print_err
die = util.die
util.set_default_verbosity(1)
hashlib_seed = 'goATC'
api_client = None
#error codes and messages; use only 0 or positive integers.
err_success = [0,"Success"]
err_unexplained_failure = [1,"Unexplained failure."]
err_bad_status_code = [2,"Bad status code returned"]
err_invalid_command = [3,"Invalid command."]
#index of commands supported by this script
supported_commands = [
["ACCOUNT_NAME",
"Simply lookup and print the account name for the supplied switchKey."],
["ACCOUNT_CONTRACTS",
"List the contracts associated with the supplied switchKey."],
["ACCOUNT_GROUPS",
"List the configurations associated with the supplied switchKey."],
["ACCOUNT_PROPERTIES",
"List the configurations associated with the supplied switchKey, contract, and group."],
["PROPERTY_HOSTNAMES",
"Download list of hostnames."],
["PROPERTY_XML",
"Download configuration metadata as XML."],
["PROPERTY_JSON",
"Download configuration metadata as JSON."],
["PROPERTY_RULES_JSON",
"Download configuration version metadata as JSON."],
["GET_TEST_CATALOG_TEMPLATE",
"Download the ATC test catalog template"],
["EXECUTE_PROPERTY_ATC_SCRIPTS",
"Download the list of ATC test scripts saved to the comments of given property version and execute them. For the experimental GUI version, supplly the --ui argument. To supply test script from a local file, supply --localFile /path/to/file."],
["LIST_GOATC_TEST_SUITES",
"Download the list of ATC test suites that were automatically generated by this script."],
["REMOVE_GOATC_TEST_SUITES",
"Remove the ATC test suites that were automatically generated by this script."],
["SHOW_GOATC_UI",
"Show experimental GUI for parameterizing this script."]
]
current_command_index = -1
def initialize_arguments():
#add builtin library arguments
args = util.ApiArguments(__doc__, autoparse=False)
#generate help text for supported commands
cmd_list = ""
for pair in supported_commands:
if (cmd_list != ""):
cmd_list = cmd_list + ", "
cmd_list = cmd_list + pair[0]
args.required_args.add_argument("--cmd", required=False,
help="supported commands: " + cmd_list,
action="store", dest="command", default="SHOW_GOATC_UI")
#parse arguments
args.parse_args()
#make sure command entered is valid
valid_command = 0
global current_command_index
for pair in supported_commands:
current_command_index = current_command_index + 1
if args.args.command.lower() == pair[0].lower():
valid_command = 1
break
if (valid_command == 0):
die(err_invalid_command[0],args.args.command + " is not a valid command. Valid commands are: " + cmd_list)
return args
if __name__ == "__main__":
try:
util.set_input_args(initialize_arguments())
if (util.get_input_args().args.explain):
print("Explanation for the " + supported_commands[current_command_index][0] + " command: \n\n" + supported_commands[current_command_index][1])
else:
util.set_api_client(util.ApiClient(switchkey=util.get_input_args().switchkey, edgerc=util.get_input_args().edgerc, section=util.get_input_args().section))
arg_fname = "addargs_" + supported_commands[current_command_index][0];
if (hasattr(commands, arg_fname)):
f = getattr(commands, arg_fname)
f()
util.get_input_args().parse_args()
fname = "docmd_" + supported_commands[current_command_index][0];
f = getattr(commands, fname)
f()
finally:
util.cleanup()
```
#### File: security/goatc/gui.py
```python
import util, wx, time, threading, pprint, os, urllib
from util import get_api_client, get_input_args, print_err, pprint_err
from subprocess import Popen, PIPE
goatc_buttons = []
property_dialog = None
listbox_groupid = None
label_groupid = None
label_populatedgroupid = None
listbox_populatedgroupid_locked = False
listbox_propertyid = None
label_populatedpropertyid = None
listbox_populatedpropertyid_locked = False
class SelectPropertyDialog(wx.Dialog):
def go(self, flag):
if (flag == False):
self.Close()
else:
wx.MessageBox("Wait for the current operation to complete before continuing.")
def __init__(self):
wx.Dialog.__init__(self, None, title="Select Other Property",style=(~wx.CLOSE_BOX))
self.SetSize(size=(400,400))
other_hidden_label = wx.StaticText(self, label="", pos=(0, 0))
other_hidden_label.Hide()
groupbtn = wx.Button(self, label="Get Group IDs", pos=(275,10))
groupbtn.Bind(wx.EVT_BUTTON, populate_group_id_nonblocking)
global txtctrl_switchkey
label_currentkey = wx.StaticText(self, label="Current Switch Key: " + txtctrl_switchkey.GetValue(), pos=(10, 10))
global label_populatedgroupid
label_populatedgroupid = wx.StaticText(self, label="", pos=(10, 16))
global listbox_populatedgroupid_locked
label_getproperties = wx.StaticText(self, label="Property ID List:", pos=(10, 185))
global label_populatedpropertyid
label_populatedpropertyid = wx.StaticText(self, label="...", pos=(10, 190))
global listbox_populatedpropertyid_locked
propertybtn = wx.Button(self, label="Get Property IDs", pos=(265,185))
global listbox_propertyid
listbox_propertyid = wx.ListBox(self, pos=(10, 205), size=(375,150))
propertybtn.Bind(wx.EVT_BUTTON, populate_property_id_nonblocking)
gobtn = wx.Button(self, label="Go!", pos=(300,355))
gobtn.Bind(wx.EVT_BUTTON, lambda event: self.go(listbox_populatedgroupid_locked))
global property_dialog
property_dialog = self
global listbox_groupid
listbox_groupid = wx.ListBox(self, pos=(10, 31), size=(375,150))
window.Bind(wx.EVT_TIMER, lambda evt, temp=other_hidden_label: update_continuously(evt, temp))
def select_other_property(e):
global txtctrl_switchkey
if (txtctrl_switchkey.GetValue() == ""):
wx.MessageBox("You must enter a switch key to select a property.")
else:
dlg = SelectPropertyDialog()
dlg.ShowModal()
window.Unbind(wx.EVT_TIMER)
dlg.Destroy()
return False
def reset_form(e):
global button_test_pragmas
button_test_pragmas.SetValue(False)
for button in goatc_buttons:
button.reset()
class GoatcButton(wx.Button):
toggled = False
default_label = ""
default_prefix = ""
def reset(self):
self.SetLabel(self.default_label)
self.toggled = False
self.SetOwnForegroundColour(wx.BLACK)
def toggle_binary_test_condition(self,e):
if (self.toggled):
self.reset()
else:
self.SetLabel(self.default_prefix)
self.SetOwnForegroundColour(wx.GREEN)
self.toggled = True
def toggle_integral_test_condition(self,e):
if (self.toggled):
self.toggled = False
self.SetLabel(self.default_label)
self.SetOwnForegroundColour(wx.BLACK)
else:
dialog = wx.TextEntryDialog(self, "Enter integer value",
"Test Condition", "", wx.OK | wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
try:
number = int(dialog.GetValue())
self.SetOwnForegroundColour(wx.GREEN)
self.toggled = True
self.SetLabel(self.default_prefix+"=" + dialog.GetValue())
except:
wx.MessageBox("Invalid value, only integers allowed.", "Error")
dialog.Destroy()
def toggle_int_comma_string_condition(self,e):
if (self.toggled):
self.toggled = False
self.SetLabel(self.default_label)
self.SetOwnForegroundColour(wx.BLACK)
else:
dialog = wx.TextEntryDialog(self, "Enter INT,STRING (an integer, followed by a comma, and then thext)",
"Test Condition", "", wx.OK | wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
try:
str = dialog.GetValue()
contents = str.split(",")
number = int(contents[0])
text = contents[1]
self.SetOwnForegroundColour(wx.GREEN)
self.toggled = True
self.SetLabel(self.default_prefix+"=" + contents[0] + "," + text)
except:
wx.MessageBox("Invalid value, only integers allowed.", "Error")
dialog.Destroy()
def toggle_integral_y_or_n_condition(self,e):
if (self.toggled):
self.toggled = False
self.SetLabel(self.default_label)
self.SetOwnForegroundColour(wx.BLACK)
else:
dialog = wx.TextEntryDialog(self, "Enter Y or N",
"Test Condition", "", wx.OK | wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
txt = (dialog.GetValue()).upper()
if (txt == "Y" or txt == "N"):
self.SetOwnForegroundColour(wx.GREEN)
self.toggled = True
self.SetLabel(self.default_prefix+"=" + txt)
else:
wx.MessageBox("Invalid value, only Y or N allowed.", "Error")
dialog.Destroy()
def toggle_string_condition(self,e):
if (self.toggled):
self.toggled = False
self.SetLabel(self.default_label)
self.SetOwnForegroundColour(wx.BLACK)
else:
dialog = wx.TextEntryDialog(self, "Enter text string",
"Test Condition", "", wx.OK | wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
txt = dialog.GetValue()
self.SetOwnForegroundColour(wx.GREEN)
self.toggled = True
self.SetLabel(self.default_prefix+"=" + txt)
dialog.Destroy()
def __init__(self, panel, label, pos, size, default_bind = True):
super().__init__(panel, label=label, pos=pos, size=size)
self.default_label = self.GetLabel()
self.default_prefix = self.default_label.split("=")[0]
self.SetOwnForegroundColour(wx.BLACK)
goatc_buttons.append(self)
if (default_bind == True):
self.Bind(wx.EVT_BUTTON, lambda event: self.toggle_binary_test_condition(event))
elif (default_bind == "Integer"):
self.Bind(wx.EVT_BUTTON, lambda event: self.toggle_integral_test_condition(event))
elif (default_bind == "Integer,String"):
self.Bind(wx.EVT_BUTTON, lambda event: self.toggle_int_comma_string_condition(event))
elif (default_bind == "YorN"):
self.Bind(wx.EVT_BUTTON, lambda event: self.toggle_integral_y_or_n_condition(event))
elif (default_bind == "String"):
self.Bind(wx.EVT_BUTTON, lambda event: self.toggle_string_condition(event))
snoring_threads = {}
window = None
hidden_label = None
label_populatedaccountname = None
label_populatedaccountname_locked = False
txtctrl_switchkey = None
txtctrl_contractid = None
combo_contractid = None
label_populatedcontractid = None
combo_populatedcontractid_locked = False
txtctrl_groupid = None
label_groupid = None
label_populatedgroupid = None
combo_populatedgroupid_locked = False
button_test_pragmas = None
def update_continuously(evt, l):
if (l != None):
p = l.GetPosition()
p.x = p.x + 1
p.y = p.y + 1
if (p.y > 10):
p.x = 0
p.y = 0
l.SetPosition((p.x,p.y))
return None;
def snore_continuously(l):
l.SetLabel("")
while l in snoring_threads:
try:
x = l.GetLabel()
x = "." + x
if (x == "..........."):
x = ""
l.SetLabel(x)
time.sleep(.5)
except:
time.sleep(.5)
def populate_property_id_nonblocking(arg=None):
global listbox_groupid
global listbox_propertyid
listbox_propertyid.Hide()
listbox_propertyid.Show()
if (listbox_groupid.GetSelection() == wx.NOT_FOUND):
wx.MessageBox("You must select a group id first.")
return None
t = threading.Thread(target=populate_property_id)
t.start()
def populate_property_id():
global property_dialog
global listbox_propertyid
global listbox_populatedpropertyid_locked
global label_populatedpropertyid
str = (listbox_groupid.GetString(listbox_groupid.GetSelection()))
strlist = str.split("\t")
if (len(strlist) < 3):
wx.MessageBox("ERROR! Invalid selection.")
return None
selectgroup = None
contractlist = []
count = 0
for ctr in strlist:
count = count + 1
if (count == 1):
selectgroup = ctr
if (count >= 3):
contractlist.append(ctr)
pprint.pprint(contractlist)
if (listbox_populatedpropertyid_locked == True):
return False
listbox_populatedpropertyid_locked = True
listbox_propertyid.Disable()
t = None
if (label_populatedpropertyid in snoring_threads):
snoring_threads.pop(label_populatedpropertyid)
t.join()
t = threading.Thread(target=snore_continuously, args=[label_populatedpropertyid])
snoring_threads[label_populatedpropertyid] = t
t.start()
file_path = os.path.realpath('goatc.py')
full_output = "\n"
for ctr in contractlist:
cmd = file_path + " --cmd ACCOUNT_PROPERTIES --switchKey " + txtctrl_switchkey.GetValue() + " --groupId " + selectgroup + " --contractId " + ctr
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
full_output = full_output + stdout.decode("utf-8") + "\n"
snoring_threads.pop(label_populatedpropertyid)
t.join()
listbox_propertyid.Hide()
listbox_propertyid.Show()
count = 0
try:
if (stderr.decode("utf-8") != ""):
label_populatedpropertyid.SetLabel("")
listbox_propertyid.Clear()
listbox_propertyid.Disable()
wx.MessageBox(stderr.decode("utf-8"),"An Error Occurred");
else:
listbox_propertyid.Clear()
groups = full_output.split("\n")
for group in groups:
if (group != ""):
count = count + 1
listbox_propertyid.Append((group.replace("|","\t")))
listbox_propertyid.Enable()
finally:
listbox_populatedpropertyid_locked = False
label_populatedpropertyid.SetLabel("")
return True
def populate_group_id_nonblocking(arg=None):
global listbox_groupid
listbox_groupid.Hide()
listbox_groupid.Show()
t = threading.Thread(target=populate_group_id)
t.start()
def populate_group_id():
global property_dialog
global listbox_groupid
global listbox_populatedgroupid_locked
if (listbox_populatedgroupid_locked == True):
return False
listbox_populatedgroupid_locked = True
listbox_groupid.Disable()
t = None
if (label_populatedgroupid in snoring_threads):
snoring_threads.pop(label_populatedgroupid)
t.join()
t = threading.Thread(target=snore_continuously, args=[label_populatedgroupid])
snoring_threads[label_populatedgroupid] = t
t.start()
file_path = os.path.realpath('goatc.py')
cmd = file_path + " --cmd ACCOUNT_GROUPS --switchKey " + txtctrl_switchkey.GetValue();
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
snoring_threads.pop(label_populatedgroupid)
t.join()
listbox_groupid.Hide()
listbox_groupid.Show()
count = 0
try:
if (stderr.decode("utf-8") != ""):
label_populatedgroupid.SetLabel("")
listbox_groupid.Clear()
listbox_groupid.Disable()
wx.MessageBox(stderr.decode("utf-8"),"An Error Occurred");
else:
listbox_groupid.Clear()
groups = stdout.decode("utf-8").split("\n")
for group in groups:
if (group != ""):
count = count + 1
listbox_groupid.Append(urllib.parse.unquote(group.replace("|","\t")))
listbox_groupid.Enable()
finally:
listbox_populatedgroupid_locked = False
label_populatedgroupid.SetLabel("")
return True
def populate_contract_id_nonblocking(arg=None):
global txtctrl_contractid
global combo_contractid
txtctrl_contractid.Hide()
txtctrl_contractid.Show()
combo_contractid.Hide()
combo_contractid.Show()
t = threading.Thread(target=populate_contract_id)
t.start()
def populate_contract_id():
global window
global combo_contractid
global combo_populatedcontractid_locked
if (combo_populatedcontractid_locked == True):
return False
combo_populatedcontractid_locked = True
combo_contractid.Disable()
t = None
if (label_populatedcontractid in snoring_threads):
snoring_threads.pop(label_populatedcontractid)
t.join()
t = threading.Thread(target=snore_continuously, args=[label_populatedcontractid])
snoring_threads[label_populatedcontractid] = t
t.start()
file_path = os.path.realpath('goatc.py')
cmd = file_path + " --cmd ACCOUNT_CONTRACTS --switchKey " + txtctrl_switchkey.GetValue();
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
snoring_threads.pop(label_populatedcontractid)
t.join()
combo_contractid.Hide()
combo_contractid.Show()
try:
if (stderr.decode("utf-8") != ""):
label_populatedcontractid.SetLabel("")
combo_contractid.Clear()
combo_contractid.Disable()
wx.MessageBox(stderr.decode("utf-8"),"An Error Occurred");
else:
combo_contractid.Clear()
combo_contractid.Disable()
contracts = stdout.decode("utf-8").split("\n")
for contract in contracts:
if (contract != ""):
combo_contractid.Append(contract)
combo_contractid.Enable()
try:
combo_contractid.SetSelection(0)
finally:
label_populatedcontractid.SetLabel("")
finally:
combo_populatedcontractid_locked = False
return True
def populate_account_name_nonblocking(arg=None):
global txtctrl_switchkey
txtctrl_switchkey.Hide()
txtctrl_switchkey.Show()
t = threading.Thread(target=populate_account_name)
t.start()
def populate_account_name():
global window
global label_populatedaccountname
global label_populatedaccountname_locked
if (label_populatedaccountname_locked == True):
return False
label_populatedaccountname_locked = True
t = None
if (label_populatedaccountname in snoring_threads):
snoring_threads.pop(label_populatedaccountname)
t.join()
t = threading.Thread(target=snore_continuously, args=[label_populatedaccountname])
snoring_threads[label_populatedaccountname] = t
t.start()
file_path = os.path.realpath('goatc.py')
cmd = file_path + " --cmd ACCOUNT_NAME --switchKey " + txtctrl_switchkey.GetValue();
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
snoring_threads.pop(label_populatedaccountname)
t.join()
if (stderr.decode("utf-8") != ""):
label_populatedaccountname.SetLabel("")
wx.MessageBox(stderr.decode("utf-8"),"An Error Occurred");
else:
label_populatedaccountname.SetLabel(stdout.decode("utf-8"))
label_populatedaccountname_locked = False
return True
def showme():
contractId = get_input_args().args.contractid
if (contractId == None):
contractId = ""
groupId = get_input_args().args.groupid
if (groupId == None):
groupId = ""
propertyId = get_input_args().args.propertyid
if (propertyId == None):
propertyId = ""
versionId = get_input_args().args.versionid
if (versionId == None):
versionId = ""
app = wx.App()
global window
window = wx.Frame(None, title="GOATC UI", size=(650, 475), pos=(50,50))
panel = wx.Panel(window)
global hidden_label
hidden_label = wx.StaticText(panel, label="", pos=(0, 0))
hidden_label.Hide()
window.timer = wx.Timer(window)
window.timer.Start(100)
#window.Bind(wx.EVT_TIMER, lambda evt, temp = hidden_label: update_continuously(evt, temp))
#button_accountname = wx.Button(panel, label="Account Name ", pos=(125, 10), size=(105,20), style=wx.BU_LEFT)
#button_accountname.Bind(wx.EVT_BUTTON, populate_account_name_nonblocking)
global txtctrl_switchkey
current_key = get_api_client().current_switchkey
if current_key == None:
current_key = ""
txtctrl_switchkey = wx.TextCtrl(panel, value=current_key, pos=(10, 30))
label_switchkey = wx.StaticText(panel, label="Switch Key", pos=(10, 10))
#global label_populatedaccountname
#label_populatedaccountname = wx.StaticText(panel, label="* click [Account Name]", pos=(125, 30))
label_contractid = wx.StaticText(panel, label="Contract Id", pos=(10, 60))
#button_contractid = wx.Button(panel, label="Contract Id List ", pos=(125, 60), size=(130,20), style=wx.BU_LEFT)
#button_contractid.Bind(wx.EVT_BUTTON, populate_contract_id_nonblocking)
global txtctrl_contractid
txtctrl_contractid = wx.TextCtrl(panel, value=contractId, pos=(10, 80))
#global combo_contractid
#combo_contractid = wx.ComboBox(panel, 1, style=wx.CB_DROPDOWN | wx.CB_READONLY, size=(125,25), pos=(125,79))
#combo_contractid.Append("* click [Contract Id List]")
#combo_contractid.SetSelection(0)
#combo_contractid.Disable()
#global label_populatedcontractid
#label_populatedcontractid = wx.StaticText(panel, label="", pos=(125, 92))
global label_groupid
label_groupid = wx.StaticText(panel, label="Group Id", pos=(10, 110))
global txtctrl_groupid
txtctrl_groupid = wx.TextCtrl(panel, value=groupId, pos=(10, 130))
#button_groupid = wx.Button(panel, label="Group Id List ", pos=(125, 110), size=(130,20), style=wx.BU_LEFT)
#button_groupid.Bind(wx.EVT_BUTTON, populate_group_id_nonblocking)
#global combo_groupid
#combo_groupid = wx.ComboBox(panel, 1, style=wx.CB_DROPDOWN | wx.CB_READONLY, size=(125,25), pos=(125,129))
#combo_groupid.Append("* click [Group Id List]")
#combo_groupid.SetSelection(0)
#combo_groupid.Disable()
#global label_populatedgroupid
#label_populatedgroupid = wx.StaticText(panel, label="", pos=(125, 142))
label_propertyid = wx.StaticText(panel, label="Property Id", pos=(10, 160))
txtctrl_propertyid = wx.TextCtrl(panel, value=propertyId, pos=(10, 180))
label_propertyid = wx.StaticText(panel, label="Version Id", pos=(10, 210))
txtctrl_propertyid = wx.TextCtrl(panel, value=versionId, pos=(10, 230))
button_propertyselector = wx.Button(panel, label="Select Other\nProperty", pos=(10, 260), size=(105,40))
button_propertyselector.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_propertyselector.Bind(wx.EVT_BUTTON, select_other_property)
button_propertyselector = wx.Button(panel, label="Use This\nProperty", pos=(10, 300), size=(105,40))
button_propertyselector.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_accountinfo = wx.Button(panel, label="Show Helpful\nInformation", pos=(10, 340), size=(105,40))
button_accountinfo.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
label_cfgfile = wx.StaticText(panel, label="Configuration file:", pos=(130, 10))
label_cfgfilevalue = wx.StaticText(panel, label="[click Use This Property]", pos=(240, 10))
label_vars = wx.StaticText(panel, label="Vars:", pos=(130, 264+10))
list_vars = wx.ListBox(panel, 1, style=wx.LB_MULTIPLE, size=(225,60), pos=(130,280+10))
button_equalsvar = wx.Button(panel, label="Equals", pos=(165, 21+10), size=(48,500))
button_equalsvar = wx.Button(panel, label="Equals", pos=(165, 21+10), size=(48,500))
button_equalsvar.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_novar = wx.Button(panel, label="NoVAR", pos=(213, 21+10), size=(48,500))
button_novar.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_noval = wx.Button(panel, label="NoVAL", pos=(261, 21+10), size=(48,500))
button_noval.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_delvar = wx.Button(panel, label="D", pos=(308, 21+10), size=(25,500))
button_delvar.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_clrvar = wx.Button(panel, label="C", pos=(332, 21+10), size=(23,500))
button_clrvar.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
label_rsphdrs = wx.StaticText(panel, label="Rsp hdrs:", pos=(360, 264+10))
list_rsphdrs = wx.ListBox(panel, 1, style=wx.LB_MULTIPLE, size=(230,60), pos=(360,280+10))
button_addrsphdr = wx.Button(panel, label="Add", pos=(420, 21+10), size=(40,500))
button_addrsphdr.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_missrsphdr = wx.Button(panel, label="Miss", pos=(460, 21+10), size=(40,500))
button_missrsphdr.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_delrsphdr = wx.Button(panel, label="Del", pos=(500, 21+10), size=(40,500))
button_delrsphdr.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_clearrsphdr = wx.Button(panel, label="Clear", pos=(540, 21+10), size=(50,500))
button_clearrsphdr.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
label_hostnames = wx.StaticText(panel, label="Hostnames:", pos=(130, 32))
label_populatedhostnames = wx.StaticText(panel, label="...", pos=(130, 37))
list_hostnames = wx.ListBox(panel, 1, style=wx.LB_MULTIPLE, size=(225,75), pos=(130,53))
button_hostnames = wx.Button(panel, label="Unselect All", pos=(210, 21), size=(145,40))
button_hostnames.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
label_hostnames = wx.StaticText(panel, label="Req hdrs:", pos=(360, 32))
#label_populatedheaders = wx.StaticText(panel, label="...", pos=(360, 37))
list_headers = wx.ListBox(panel, 1, style=wx.LB_MULTIPLE, size=(230,75), pos=(360,53))
button_addheader = wx.Button(panel, label="Add", pos=(420, 21), size=(40,40))
button_addheader.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_modheader = wx.Button(panel, label="Mod", pos=(460, 21), size=(40,40))
button_modheader.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_delheader = wx.Button(panel, label="Del", pos=(500, 21), size=(40,40))
button_delheader.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_clearheader = wx.Button(panel, label="Clear", pos=(540, 21), size=(50,40))
button_clearheader.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_status = GoatcButton(panel, label="STATUS=___", pos=(130, 120), size=(80, 40), default_bind="Integer")
button_test_status.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_cpcode = GoatcButton(panel, label="CPCODE=__________", pos=(210, 120), size=(120, 40), default_bind="Integer")
button_test_cpcode.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_sureroute = GoatcButton(panel, label="SUREROUTE", pos=(330, 120), size=(80, 40))
button_test_sureroute.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_sureroute = GoatcButton(panel, label="PREFETCH", pos=(410, 120), size=(80, 40))
button_test_sureroute.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_gzip = GoatcButton(panel, label="GZIP", pos=(490, 120), size=(40, 40))
button_test_gzip.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_nostore = GoatcButton(panel, label="NOSTOR", pos=(530, 120), size=(60, 40))
button_test_nostore.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_redirect = GoatcButton(panel, label="REDIRECT=___,_________________________________________________________", pos=(130, 142), size=(360, 40),default_bind="Integer,String")
button_test_redirect.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_cache = GoatcButton(panel, label="CACHE=__,______", pos=(490, 142), size=(100, 40),default_bind="Integer,String")
button_test_cache.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_bypass = GoatcButton(panel, label="BYPASS", pos=(130, 164), size=(60, 40))
button_test_bypass.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_exclude = GoatcButton(panel, label="EXCLUDEPARAMS", pos=(190, 164), size=(100, 40))
button_test_exclude.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_logrefer = GoatcButton(panel, label="LOGREFER=_", pos=(290, 164), size=(80, 40),default_bind="YorN")
button_test_logrefer.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_loghost = GoatcButton(panel, label="LOGHOST=_", pos=(370, 164), size=(75, 40),default_bind="YorN")
button_test_loghost.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_loglang = GoatcButton(panel, label="LOGLANG=_", pos=(445, 164), size=(75, 40),default_bind="YorN")
button_test_loglang.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_keycontains = GoatcButton(panel, label="KEYCONTAINS=_________________________________________________________", pos=(130, 186), size=(360, 40),default_bind="String")
button_test_keycontains.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_logcook = GoatcButton(panel, label="LOGCUSTOM=_", pos=(490, 164), size=(100, 83),default_bind="YorN")
button_test_logcook.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_logcook = GoatcButton(panel, label="LOGCOOKIES=_", pos=(490, 208), size=(100, 40),default_bind="YorN")
button_test_logcook.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_keyomits = GoatcButton(panel, label="KEYOMITS=_________________________________________________________", pos=(130, 208), size=(360, 40),default_bind="String")
button_test_keyomits.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_hardcode = GoatcButton(panel, label="HARDCODE=_________________________________________________________", pos=(130, 230), size=(360, 40),default_bind="String")
button_test_hardcode.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_secpolicy = GoatcButton(panel, label="SECPOL=________", pos=(490, 208), size=(100, 40),default_bind="String")
button_test_secpolicy.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_notd = GoatcButton(panel, label="NOTD", pos=(490, 230), size=(100, 40))
button_test_notd.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
voffset = 25
label_paths = wx.StaticText(panel, label="Request path (not including hostname):", pos=(130, 355))
txtctrl_path = wx.TextCtrl(panel, value="/", pos=(375, 353), size=(215,22))
button_addtest = wx.Button(panel, label="Update Test Script List", pos=(130, 380+voffset), size=(170, 40))
button_addtest.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_cleartest = wx.Button(panel, label="Reset Form", pos=(300, 380+voffset), size=(100, 40))
button_cleartest.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_cleartest.Bind(wx.EVT_BUTTON, reset_form)
button_savetest = wx.Button(panel, label="Save", pos=(400, 380+voffset), size=(40, 40))
button_savetest.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_copytest = wx.Button(panel, label="Copy", pos=(440, 380+voffset), size=(40, 40))
button_copytest.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_runtest = wx.Button(panel, label="Load", pos=(480, 380+voffset), size=(40, 40))
button_runtest.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_goatc = wx.Button(panel, label="GO ATC!", pos=(520, 380+voffset), size=(70, 40))
button_goatc.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
combo_templates = wx.ComboBox(panel, 1, style=wx.CB_DROPDOWN | wx.CB_READONLY, size=(325,25), pos=(130,358+voffset))
button_templates = wx.Button(panel, label="Apply Template Instead", pos=(460, 350+voffset), size=(130, 40))
button_templates.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
global button_test_pragmas
button_test_pragmas = wx.CheckBox(panel, label="PRAGMAS", pos=(522, 164), size=(95, 40))
button_test_pragmas.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
window.Show(True)
app.MainLoop()
while len(snoring_threads) > 0:
try:
for t in snoring_threads:
th = snoring_threads.pop(t)
th.join()
except:
print_err("Cleaning up threads...\n")
util.die(0, "UI window closed. Exiting gracefully.")
```
#### File: security/goatc/testme.py
```python
import util, pprint
#initialize main python module
debugmsg = util.print_err
die = util.die
util.set_default_verbosity(1)
hashlib_seed = 'illuminATC'
#error codes and messages; use only 0 or positive integers.
err_success = [0,"Success"]
err_unexplained_failure = [1,"Unexplained failure."]
err_bad_status_code = [2,"Bad status code returned"]
def initialize_arguments():
#add builtin library arguments
args = util.ApiArguments(__doc__, autoparse=False)
#add any additional required arguments here before parsing
#example:
#args.required_args.add_argument("--REQUIRED", required=True, help="REQUIRED ARGUMENT", action="store", dest="REQUIRED")
#add any additional optional arguments here before parsing
#example:
#args.parser.add_argument("--OPTIONAL", required=False, help="OPTIONAL", action="store", dest="OPTIONAL")
args.parse_args()
return args
def getAccountName(client):
parameters = {'accountSwitchKey': client.current_switchkey}
json_data = client.httpCaller.getJSONResult('/papi/v1/groups', parameters)
if ("accountName" in json_data and client.get_last_response_code() == 200):
return json_data["accountName"]
return None
if __name__ == "__main__":
try:
arguments = initialize_arguments()
client = util.ApiClient(switchkey=arguments.switchkey, edgerc=arguments.edgerc, section=arguments.section)
name = getAccountName(client)
if (name == None):
print("Could not retrieve account name.\nStatus code: ")
print(client.get_last_response_code())
print("\nJSON response body:")
pprint.pprint(client.get_last_endpoint_result().json());
else:
print(name)
finally:
util.cleanup()
``` |
{
"source": "joshwatson/debugger",
"score": 2
} |
#### File: debugger/dockwidgets/MemoryWidget.py
```python
from PySide2 import QtCore
from PySide2.QtCore import Qt, QAbstractItemModel, QModelIndex, QSize
from PySide2.QtGui import QPalette, QFontMetricsF
from PySide2.QtWidgets import QApplication, QHBoxLayout, QVBoxLayout, QWidget, QTableView, QItemDelegate, QStyle, QHeaderView, QAbstractItemView
import binaryninja
import binaryninjaui
from binaryninja import BinaryView
from binaryninjaui import DockContextHandler, UIActionHandler, LinearView, ViewFrame
from . import widget
from .. import binjaplug
class DebugMemoryWidget(QWidget, DockContextHandler):
def __init__(self, parent, name, data):
if not type(data) == binaryninja.binaryview.BinaryView:
raise Exception('expected widget data to be a BinaryView')
self.bv = data
memory_view = binjaplug.get_state(data).memory_view
QWidget.__init__(self, parent)
DockContextHandler.__init__(self, self, name)
self.editor = LinearView(memory_view, ViewFrame.viewFrameForWidget(self))
self.actionHandler = UIActionHandler()
self.actionHandler.setupActionHandler(self)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self.editor)
self.setLayout(layout)
def notifyOffsetChanged(self, offset):
pass
def notifyMemoryChanged(self):
adapter = binjaplug.get_state(self.bv).adapter
# Refresh the editor
if adapter is None:
self.editor.navigate(0)
return
self.editor.navigate(adapter.reg_read('rsp'))
def shouldBeVisible(self, view_frame):
if view_frame is None:
return False
else:
return True
```
#### File: joshwatson/debugger/lldb.py
```python
import os
import re
import shutil
import socket
import subprocess
from . import rsp
from . import gdblike
from . import DebugAdapter
macos_signal_to_name = {
1: 'SIGHUP',
2: 'SIGINT',
3: 'SIGQUIT',
4: 'SIGILL',
5: 'SIGTRAP',
6: 'SIGABRT',
7: 'SIGEMT',
8: 'SIGFPE',
9: 'SIGKILL',
10: 'SIGBUS',
11: 'SIGSEGV',
12: 'SIGSYS',
13: 'SIGPIPE',
14: 'SIGALRM',
15: 'SIGTERM',
16: 'SIGURG',
17: 'SIGSTOP',
18: 'SIGTSTP',
19: 'SIGCONT',
20: 'SIGCHLD',
21: 'SIGTTIN',
22: 'SIGTTOU',
23: 'SIGIO',
24: 'SIGXCPU',
25: 'SIGXFSZ',
26: 'SIGVTALRM',
27: 'SIGPROF',
28: 'SIGWINCH',
29: 'SIGINFO',
30: 'SIGUSR1',
31: 'SIGUSR2'
}
macos_signal_to_debugadapter_reason = {
1: DebugAdapter.STOP_REASON.SIGNAL_HUP,
2: DebugAdapter.STOP_REASON.SIGNAL_INT,
3: DebugAdapter.STOP_REASON.SIGNAL_QUIT,
4: DebugAdapter.STOP_REASON.SIGNAL_ILL,
5: DebugAdapter.STOP_REASON.SIGNAL_TRAP,
6: DebugAdapter.STOP_REASON.SIGNAL_ABRT,
7: DebugAdapter.STOP_REASON.SIGNAL_EMT,
8: DebugAdapter.STOP_REASON.SIGNAL_FPE,
9: DebugAdapter.STOP_REASON.SIGNAL_KILL,
10: DebugAdapter.STOP_REASON.SIGNAL_BUS,
11: DebugAdapter.STOP_REASON.SIGNAL_SEGV,
12: DebugAdapter.STOP_REASON.SIGNAL_SYS,
13: DebugAdapter.STOP_REASON.SIGNAL_PIPE,
14: DebugAdapter.STOP_REASON.SIGNAL_ALRM,
15: DebugAdapter.STOP_REASON.SIGNAL_TERM,
16: DebugAdapter.STOP_REASON.SIGNAL_URG,
17: DebugAdapter.STOP_REASON.SIGNAL_STOP,
18: DebugAdapter.STOP_REASON.SIGNAL_TSTP,
19: DebugAdapter.STOP_REASON.SIGNAL_CONT,
20: DebugAdapter.STOP_REASON.SIGNAL_CHLD,
21: DebugAdapter.STOP_REASON.SIGNAL_TTIN,
22: DebugAdapter.STOP_REASON.SIGNAL_TTOU,
23: DebugAdapter.STOP_REASON.SIGNAL_IO,
24: DebugAdapter.STOP_REASON.SIGNAL_XCPU,
25: DebugAdapter.STOP_REASON.SIGNAL_XFSZ,
26: DebugAdapter.STOP_REASON.SIGNAL_VTALRM,
27: DebugAdapter.STOP_REASON.SIGNAL_PROF,
28: DebugAdapter.STOP_REASON.SIGNAL_WINCH,
29: DebugAdapter.STOP_REASON.SIGNAL_INFO,
30: DebugAdapter.STOP_REASON.SIGNAL_USR1,
31: DebugAdapter.STOP_REASON.SIGNAL_USR2,
}
class DebugAdapterLLDB(gdblike.DebugAdapterGdbLike):
def __init__(self, **kwargs):
gdblike.DebugAdapterGdbLike.__init__(self, **kwargs)
self.os_sig_to_reason = macos_signal_to_debugadapter_reason
# register state
self.reg_info = {}
# address -> True
self.breakpoints = {}
# thread state
self.thread_idx_selected = None
#--------------------------------------------------------------------------
# API
#--------------------------------------------------------------------------
# session start/stop
def exec(self, path, args):
# resolve path to debugserver
path_debugserver = shutil.which('debugserver')
if not path_debugserver:
path_debugserver = '/Library/Developer/CommandLineTools/Library/' + \
'PrivateFrameworks/LLDB.framework/Versions/A/Resources/debugserver'
if not os.path.exists(path_debugserver):
raise Exception('cannot locate debugserver')
# get available port
port = gdblike.get_available_port()
if port == None:
raise Exception('no available ports')
# invoke debugserver
dbg_args = [path_debugserver, 'localhost:%d'%port, path, '--']
dbg_args.extend(args)
#print('args are: ', ' '.join(dbg_args))
try:
subprocess.Popen(dbg_args, stdin=None, stdout=None, stderr=None, preexec_fn=gdblike.preexec)
except Exception:
raise Exception('invoking debugserver (used path: %s)' % path_debugserver)
# connect to it
self.sock = gdblike.connect('localhost', port)
# learn initial registers
self.reg_info_load()
# threads
def thread_list(self):
reply = rsp.tx_rx(self.sock, 'qfThreadInfo', 'ack_then_reply')
if not reply.startswith('m'):
raise DebugAdapter.GeneralError("retrieving thread list from server after qfThreadInfo packet")
tids = reply[1:].split(',')
tids = list(map(lambda x: int(x,16), tids))
return tids
def thread_selected(self):
reply = rsp.tx_rx(self.sock, '?', 'ack_then_reply')
context = rsp.packet_T_to_dict(reply)
if not 'thread' in context:
raise DebugAdapter.GeneralError("setting thread on server after '?' packet")
return context.get('thread')
def thread_select(self, tid):
if not tid in self.thread_list():
raise DebugAdapter.GeneralError("tid 0x%X is not in threads list" % tid)
# changing threads? new regs
self.reg_cache = {}
# set thread for step and continue operations
payload = 'Hc%x' % tid
reply = rsp.tx_rx(self.sock, payload, 'ack_then_ok')
# set thread for other operations
payload = 'Hg%x' % tid
reply = rsp.tx_rx(self.sock, payload, 'ack_then_ok')
# breakpoints
#def breakpoint_set(self, addr):
#def breakpoint_clear(self, addr):
#def breakpoint_list(self):
# register
#def reg_read(self, name):
#def reg_write(self, name, value):
#def reg_list(self):
#def reg_bits(self, name):
# mem
#def mem_read(self, address, length):
#def mem_write(self, address, data):
def mem_modules(self):
module2addr = {}
reply = rsp.tx_rx(self.sock, 'jGetLoadedDynamicLibrariesInfos:{"fetch_all_solibs":true}')
for (addr, path) in re.findall(r'"load_address":(\d+).*?"pathname":"([^"]+)"', reply):
addr = int(addr, 10)
module2addr[path] = addr
return module2addr
# break
#def break_into(self):
#def break_reason(self):
# execution control, all return:
# returns (STOP_REASON.XXX, <extra_info>)
def go(self):
self.reg_cache = {}
return self.go_generic('c', self.handler_async_pkt)
def step_into(self):
self.reg_cache = {}
return self.go_generic('vCont;s', self.handler_async_pkt)
def step_over(self):
# gdb, lldb just doesn't have this, you must synthesize it yourself
self.reg_cache = {}
raise NotImplementedError('step over')
# asynchronously called when inside a "go" to inform us of stdout (and
# possibly other stuff)
def handler_async_pkt(self, pkt):
if pkt.startswith('O'):
msg = pkt[1:]
data = ''.join([chr(int(msg[2*x:2*x+2], 16)) for x in range(int(len(msg)/2))])
if self.cb_stdout is not None:
self.cb_stdout(data)
else:
print(data, end='')
else:
print('handler_async_pkt() got unknown packet: %s' % repr(pkt))
```
#### File: joshwatson/debugger/rsp.py
```python
import re
import socket
# custom exceptions
class RspDisconnected(Exception):
pass
class RspAckMissing(Exception):
pass
class RspExpectedStartOfPacket(Exception):
pass
class RspGeneralError(Exception):
pass
def send_raw(sock, data):
sock.send(data.encode('utf-8'))
def send_packet_data(sock, data):
# packet is exactly "$<data>#<checksum>"
checksum = sum(map(ord, data))
packet = '$' + data + '#' + ("%02x" % (checksum % 256))
send_raw(sock, packet)
def recv_packet_data(sock):
hexes = b'abcdefABCDEF0123456789'
# consume ack's
tmp = b'+'
while tmp == b'+':
tmp = sock.recv(1)
if tmp == b'':
raise RspDisconnected('disconnection while receiving packet')
# start packet
pkt = tmp
if pkt != b'$':
raise RspExpectedStartOfPacket('got instead: %s' % str(pkt))
# consume until '#' and checksum bytes
while not (len(pkt)>=3 and pkt[-3] == ord('#') and pkt[-2] in hexes and pkt[-1] in hexes):
tmp = sock.recv(1)
if tmp == b'':
raise RspDisconnected('disconnection while receiving packet')
pkt = pkt + tmp
# acknowledge
send_raw(sock, '+')
return pkt[1:-3].decode('utf-8')
def consume_ack(sock):
resp = sock.recv(1)
if resp == b'':
raise RspDisconnected('disconnection while waiting for ack')
if resp != b'+':
raise RspAckMissing('got instead: %s' % str(resp))
return b'+'
#def is_connected(sock):
# print('testing RSP connection')
# result = None
# try:
# sock.setblocking(0)
# resp = sock.recv(1, socket.MSG_PEEK)
# sock.setblocking(1)
# result = (resp != '')
# except Exception:
# result = False
#
# print('RSP connection status: %s' % str(result))
def tx_rx(sock, data, expect='ack_then_reply', handler_async_pkt=None):
send_packet_data(sock, data)
reply = None
if expect == 'nothing':
reply = ''
elif expect == 'ack_then_reply':
consume_ack(sock)
reply = recv_packet_data(sock)
elif expect == 'mixed_output_ack_then_reply':
ack_received = False
while 1:
peek1 = sock.recv(1, socket.MSG_PEEK)
if peek1 == b'+':
if ack_received:
raise RspGeneralError('received two acks, somethings wrong')
sock.recv(1)
ack_received = True
continue
if peek1 != b'$':
raise RspExpectedStartOfPacket('got: %s' % sock.recv(16))
reply = recv_packet_data(sock)
if reply[0] == 'O':
if handler_async_pkt:
handler_async_pkt(reply)
else:
# return first non-output packet
break
if not ack_received:
raise RspGeneralError('expected ack, none received')
result = reply
elif expect == 'ack_then_ok':
consume_ack(sock)
reply = recv_packet_data(sock)
if reply != 'OK':
raise RspGeneralError('expected OK, got: %s' % reply)
elif expect == 'ack_then_empty':
consume_ack(sock)
reply = recv_packet_data(sock)
if reply != '':
raise RspGeneralError('expected empty, got: %s' % reply)
else:
print('dunno how to expect %s' % expect)
if '*' in reply:
reply = un_rle(reply)
return reply
def send_ack(sock):
packet = '+'
sock.send(packet)
print(packet.decode('utf-8'), '->')
#--------------------------------------------------------------------------
# GDB RSP FUNCTIONS (HIGHER LEVEL)
#--------------------------------------------------------------------------
def register_scan(sock):
result = [None]*256
for i in range(256):
reply = tx_rx(sock, 'qRegisterInfo%02X' % i, 'ack_then_reply')
if not reply.startswith('name:'):
break
info = {}
for key_vals in reply.split(';'):
if not key_vals:
continue
if not ':' in key_vals:
raise RspGeneralError('expected \':\' in qRegisterInfo reply: %s' % key_vals)
(key, val) = key_vals.split(':')
info[key] = val
#print('reg %d is %s' % (i, name))
result[i] = info
return result
def un_rle(data):
if not '*' in data:
return data
skip = 0
result = ''
for (i,char) in enumerate(data):
if skip:
skip = False
elif char == '*':
repeat = ord(data[i+1])-29
result = result + result[-1]*repeat
skip = True
else:
result += char
return result
def packet_T_to_dict(data, lookup_reg={}):
# map the info to a context dictionary
context = {}
context['signal'] = int(data[1:3], 16)
for key_vals in data[3:].split(';'):
if not key_vals:
continue
if not ':' in key_vals:
raise RspGeneralError('expected \':\' in packet T reply: %s' % key_vals)
(key, val) = key_vals.split(':')
val = un_rle(val)
if key == 'thread':
tid = None
if val.startswith('p'):
if not '.' in val:
raise RspGeneralError('expected \'.\' in thread value of packet T reply: %s' % reply)
(core_id, thread_id) = val[1:].split('.')
# TODO: deal with cores
context['thread'] = int(thread_id, 16)
else:
context['thread'] = int(val, 16)
elif re.match(r'^[0-9a-fA-F]+$', key):
rid = int(key, 16)
reg_name = lookup_reg.get(rid, 'r%d' % rid)
val = int(''.join(reversed([val[i:i+2] for i in range(0,len(val),2)])), 16)
context[reg_name] = val
else:
# 'metype', 'mecount', 'medata', 'memory', etc.
context[key] = val
return context
``` |
{
"source": "joshwatson/emilator",
"score": 2
} |
#### File: joshwatson/emilator/emilator.py
```python
import struct
import errors
import llilvisitor
import memory
from binaryninja import (LLIL_GET_TEMP_REG_INDEX, LLIL_REG_IS_TEMP,
Architecture, BinaryView, Endianness, ILRegister,
ImplicitRegisterExtend, LowLevelILFunction,
SegmentFlag)
fmt = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'}
def sign_extend(value, bits):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
class Emilator(llilvisitor.LLILVisitor):
def __init__(self, function, view=None):
super(Emilator, self).__init__()
if not isinstance(function, LowLevelILFunction):
raise TypeError('function must be a LowLevelILFunction')
self._function = function
if view is None:
view = BinaryView()
self._view = view
self._regs = {}
self._flags = {}
self._memory = memory.Memory(function.arch.address_size)
for segment in view.segments:
self._memory.map(
segment.start, segment.length, segment.flags,
view.read(segment.start, segment.length)
)
self._function_hooks = {}
self.instr_index = 0
@property
def function(self):
return self._function
@property
def mapped_memory(self):
return list(self._memory)
@property
def registers(self):
return dict(self._regs)
@property
def function_hooks(self):
return dict(self._function_hooks)
@property
def instr_hooks(self):
return dict(self._hooks)
def map_memory(self,
start=None,
length=0x1000,
flags=SegmentFlag.SegmentReadable | SegmentFlag.SegmentWritable,
data=None):
return self._memory.map(start, length, flags, data)
def unmap_memory(self, base, size):
raise errors.UnimplementedError('Unmapping memory not implemented')
def register_function_hook(self, function, hook):
self._function_hooks[function] = hook
def register_instruction_hook(self, operand, hook):
# These hooks will be fallen back on if LLIL_UNIMPLEMENTED
# is encountered
pass
def unregister_function_hook(self, function, hook):
pass
def unregister_instruction_hook(self, operand, hook):
pass
def set_register_value(self, register, value):
# If it's a temp register, just set the value no matter what.
# Maybe this will be an issue eventually, maybe not.
if (isinstance(register, (int, long)) and
LLIL_REG_IS_TEMP(register)):
self._regs[register] = value
return value
if isinstance(register, ILRegister):
if not LLIL_REG_IS_TEMP(register.index):
register = register.name
else:
self._regs[register.index] = value
return value
arch = self._function.arch
reg_info = arch.regs[register]
# normalize value to be unsigned
if value < 0:
value = value + (1 << reg_info.size * 8)
if register == reg_info.full_width_reg:
self._regs[register] = value
return value
full_width_reg_info = arch.regs[reg_info.full_width_reg]
full_width_reg_value = self._regs.get(full_width_reg_info.full_width_reg)
if (full_width_reg_value is None and
(reg_info.extend == ImplicitRegisterExtend.NoExtend or
reg_info.offset != 0)):
raise errors.UndefinedError(
'Register {} not defined'.format(
reg_info.full_width_reg
)
)
if reg_info.extend == ImplicitRegisterExtend.ZeroExtendToFullWidth:
full_width_reg_value = value
elif reg_info.extend == ImplicitRegisterExtend.SignExtendToFullWidth:
full_width_reg_value = (
(value ^ ((1 << reg_info.size * 8) - 1)) -
((1 << reg_info.size * 8) - 1) +
(1 << full_width_reg_info.size * 8)
)
elif reg_info.extend == ImplicitRegisterExtend.NoExtend:
# mask off the value that will be replaced
mask = (1 << reg_info.size * 8) - 1
full_mask = (1 << full_width_reg_info.size * 8) - 1
reg_bits = mask << (reg_info.offset * 8)
full_width_reg_value &= full_mask ^ reg_bits
full_width_reg_value |= value << reg_info.offset * 8
self._regs[full_width_reg_info.full_width_reg] = full_width_reg_value
return full_width_reg_value
def get_register_value(self, register):
if (isinstance(register, int) and
LLIL_REG_IS_TEMP(register)):
reg_value = self._regs.get(register)
if reg_value is None:
raise errors.UndefinedError(
'Register {} not defined'.format(
LLIL_GET_TEMP_REG_INDEX(register)
)
)
return reg_value
if isinstance(register, ILRegister):
if not LLIL_REG_IS_TEMP(register.index):
register = register.name
else:
reg_value = self._regs.get(register.index)
if reg_value is None:
raise errors.UndefinedError(
'Register {} not defined'.format(
LLIL_GET_TEMP_REG_INDEX(register)
)
)
return reg_value
reg_info = self._function.arch.regs[register]
full_reg_value = self._regs.get(reg_info.full_width_reg)
if full_reg_value is None:
raise errors.UndefinedError(
'Register {} not defined'.format(
register
)
)
mask = (1 << reg_info.size * 8) - 1
if register == reg_info.full_width_reg:
return full_reg_value & mask
mask = (1 << reg_info.size * 8) - 1
reg_bits = mask << (reg_info.offset * 8)
reg_value = (full_reg_value & reg_bits) >> (reg_info.offset * 8)
return reg_value
def set_flag_value(self, flag, value):
self._flags[flag] = value
return value
def get_flag_value(self, flag):
# Assume that any previously unset flag is False
value = self._flags.get(flag, False)
return value
def read_memory(self, addr, length):
if length not in fmt:
raise ValueError('read length must be in (1,2,4,8)')
# XXX: Handle sizes > 8 bytes
pack_fmt = (
# XXX: Endianness string bug
'<' if self._function.arch.endianness == Endianness.LittleEndian
else ''
) + fmt[length]
if addr not in self._memory:
raise errors.MemoryAccessError(
'Address {:x} is not valid.'.format(addr)
)
try:
return struct.unpack(
pack_fmt, self._memory.read(addr, length)
)[0]
except:
raise errors.MemoryAccessError(
'Could not read memory at {:x}'.format(addr)
)
def write_memory(self, addr, data, length=None):
# XXX: This is terribly implemented
if addr not in self._memory:
raise errors.MemoryAccessError(
'Address {:x} is not valid.'.format(addr)
)
if isinstance(data, (int, long)):
if length not in (1, 2, 4, 8):
raise KeyError('length is not 1, 2, 4, or 8.')
# XXX: Handle sizes > 8 bytes
pack_fmt = (
# XXX: Endianness string bug
'<' if self._function.arch.endianness == Endianness.LittleEndian
else ''
) + fmt[length]
data = struct.pack(pack_fmt, data)
self._memory.write(addr, data)
return True
def execute_instruction(self):
# Execute the current IL instruction
instruction = self._function[self.instr_index]
# increment to next instruction (can be changed by instruction)
self.instr_index += 1
self.visit(instruction)
def run(self):
while True:
try:
yield self.execute_instruction()
except IndexError:
if self.instr_index >= len(self.function):
raise StopIteration()
else:
raise
def _find_available_segment(self, size=0x1000, align=1):
new_segment = None
current_address = 0
max_address = (1 << (self._function.arch.address_size * 8)) - 1
align_mask = (1 << (self._function.arch.address_size * 8)) - align
while current_address < (max_address - size):
segment = self._view.get_segment_at(current_address)
if segment is not None:
current_address = (segment.end + align) & align_mask
continue
segment_end = current_address + size - 1
if self._view.get_segment_at(segment_end) is None:
new_segment = current_address
break
return new_segment
def visit_LLIL_SET_REG(self, expr):
value = self.visit(expr.src)
self.set_register_value(expr.dest, value)
return True
def visit_LLIL_CONST(self, expr):
return expr.constant
def visit_LLIL_CONST_PTR(self, expr):
return expr.constant
def visit_LLIL_REG(self, expr):
value = self.get_register_value(expr.src)
return value
def visit_LLIL_LOAD(self, expr):
addr = self.visit(expr.src)
return self.read_memory(addr, expr.size)
def visit_LLIL_STORE(self, expr):
addr = self.visit(expr.dest)
value = self.visit(expr.src)
self.write_memory(addr, value, expr.size)
return True
def visit_LLIL_PUSH(self, expr):
sp = self.function.arch.stack_pointer
value = self.visit(expr.src)
sp_value = self.get_register_value(sp)
self.write_memory(sp_value, value, expr.size)
sp_value -= expr.size
return self.set_register_value(sp, sp_value)
def visit_LLIL_POP(self, expr):
sp = self.function.arch.stack_pointer
sp_value = self.get_register_value(sp)
sp_value += expr.size
value = self.read_memory(sp_value, expr.size)
self.set_register_value(sp, sp_value)
return value
def visit_LLIL_GOTO(self, expr):
self.instr_index = expr.dest
return self.instr_index
def visit_LLIL_IF(self, expr):
condition = self.visit(expr.condition)
if condition:
self.instr_index = expr.true
else:
self.instr_index = expr.false
return condition
def visit_LLIL_CMP_NE(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left != right
def visit_LLIL_CMP_E(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left == right
def visit_LLIL_CMP_SLT(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
if (left & (1 << ((expr.size * 8) - 1))):
left = left - (1 << (expr.size * 8))
if (right & (1 << ((expr.size * 8) - 1))):
right = right - (1 << (expr.size * 8))
return left < right
def visit_LLIL_CMP_UGT(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left > right
def visit_LLIL_ADD(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
mask = (1 << expr.size * 8) - 1
return (left + right) & mask
def visit_LLIL_AND(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left & right
def visit_LLIL_OR(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left | right
def visit_LLIL_SUB(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left - right
def visit_LLIL_SET_FLAG(self, expr):
flag = expr.dest.index
value = self.visit(expr.src)
return self.set_flag_value(flag, value)
def visit_LLIL_FLAG(self, expr):
flag = expr.src.index
return self.get_flag_value(flag)
def visit_LLIL_RET(self, expr):
# we'll stop for now, but this will need to retrieve the return
# address and jump to it.
raise StopIteration
def visit_LLIL_CALL(self, expr):
target = self.visit(expr.dest)
if target in self._function_hooks:
self._function_hooks[target](self)
return True
target_function = self._view.get_function_at(target)
if not target_function:
self._view.create_user_function(target)
self._view.update_analysis_and_wait()
target_function = self._view.get_function_at(target)
self._function = target_function.low_level_il
self.instr_index = 0
return True
def visit_LLIL_SX(self, expr):
orig_value = self.visit(expr.src)
sign_bit = 1 << ((expr.size * 8) - 1)
extend_value = (orig_value & (sign_bit - 1)) - (orig_value & sign_bit)
return extend_value
def visit_LLIL_ZX(self, expr):
return self.visit(expr.src)
def visit_LLIL_XOR(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left ^ right
def visit_LLIL_LSL(self, expr):
mask = (1 << expr.size * 8) - 1
left = self.visit(expr.left)
right = self.visit(expr.right)
return (left << right) & mask
def visit_LLIL_LSR(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left >> right
if __name__ == '__main__':
il = LowLevelILFunction(Architecture['x86_64'])
emi = Emilator(il)
emi.set_register_value('rbx', -1)
emi.set_register_value('rsp', 0x1000)
print '[+] Mapping memory at 0x1000 (size: 0x1000)...'
emi.map_memory(0x1000, flags=SegmentFlag.SegmentReadable)
print '[+] Initial Register State:'
for r, v in emi.registers.iteritems():
print '\t{}:\t{:x}'.format(r, v)
il.append(il.push(8, il.const(8, 0xbadf00d)))
il.append(il.push(8, il.const(8, 0x1000)))
il.append(il.set_reg(8, 'rax', il.pop(8)))
il.append(il.set_reg(8, 'rbx', il.load(8, il.reg(8, 'rax'))))
print '[+] Instructions:'
for i in range(len(emi.function)):
print '\t'+repr(il[i])
print '[+] Executing instructions...'
for i in emi.run():
print '\tInstruction completed.'
print '[+] Final Register State:'
for r, v in emi.registers.iteritems():
print '\t{}:\t{:x}'.format(r, v)
``` |
{
"source": "joshwatson/f-ing-around-with-binaryninja",
"score": 2
} |
#### File: decompiler/decompiler/debug.py
```python
from __future__ import annotations
from typing import Dict, List
from binaryninja import (BinaryView, BranchType, FlowGraph, FlowGraphNode,
FlowGraphReport, ReportCollection, show_graph_report,
BasicBlockEdge, MediumLevelILBasicBlock, Settings)
from . import mlil_ast
from .nodes import MediumLevelILAstNode
def generate_graph(
view: BinaryView,
region: MediumLevelILAstNode,
collection: ReportCollection = None,
title: str = ''
):
if not Settings().get_bool('linearmlil.debug'):
return
graph = FlowGraph()
def add_children(node: MediumLevelILAstNode) -> FlowGraphNode:
node_node = FlowGraphNode(graph)
graph.append(node_node)
node_line = node.type
if node.type == 'block':
node_line += f': {node.block}'
if node.type == 'break':
node_line += f': {node.start}'
elif node.type in ('seq', 'case'):
node_line += f': {node.start}'
for child in node.nodes:
child_node = add_children(child)
node_node.add_outgoing_edge(
BranchType.UnconditionalBranch,
child_node
)
elif node.type == 'cond':
node_line += f': {node.condition}'
child = add_children(node[True])
node_node.add_outgoing_edge(
BranchType.TrueBranch,
child
)
if node[False] is not None:
child = add_children(node[False])
node_node.add_outgoing_edge(
BranchType.FalseBranch,
child
)
elif node.type == 'switch':
for child in node.cases:
child_node = add_children(child)
node_node.add_outgoing_edge(
BranchType.UnconditionalBranch,
child_node
)
elif node.type == 'loop':
node_line += f': {node.loop_type} {node.condition}'
child_node = add_children(node.body)
node_node.add_outgoing_edge(
BranchType.UnconditionalBranch,
child_node
)
node_node.lines = [node_line]
return node_node
# iterate over regions and create nodes for them
# in the AST
add_children(region)
if collection is not None:
if not title:
title = f' {region.type}: {region.start}'
report = FlowGraphReport(title, graph, view)
collection.append(report)
else:
show_graph_report('Current AST', graph)
def graph_slice(
view: BinaryView,
ns: MediumLevelILBasicBlock,
ne: MediumLevelILBasicBlock,
slice: List[List[BasicBlockEdge]],
collection: ReportCollection,
title: str = '',
):
if not Settings().get_bool('linearmlil.debug'):
return
graph = FlowGraph()
ns_node = FlowGraphNode(graph)
ns_node.lines = [f'Start: {ns.start}']
ne_node = FlowGraphNode(graph)
ne_node.lines = [f'End: {ne.start}']
nodes = {ns.start: ns_node, ne.start: ne_node}
graph.append(ns_node)
graph.append(ne_node)
for path in slice:
for edge in path:
source = edge.source
if source.start in nodes:
source_node = nodes[source.start]
else:
source_node = FlowGraphNode(graph)
source_node.lines = [f'Block: {source.start}']
nodes[source.start] = source_node
graph.append(source_node)
target = edge.target
if target.start in nodes:
target_node = nodes[target.start]
else:
target_node = FlowGraphNode(graph)
target_node.lines = [f'Block: {target.start}']
nodes[target.start] = target_node
graph.append(target_node)
if next(
(
e for e in source_node.outgoing_edges
if e.target == target_node
),
None
):
continue
source_node.add_outgoing_edge(
edge.type,
target_node
)
if collection is not None:
if not title:
title = f'Slice: {ns}->{ne}'
report = FlowGraphReport(title, graph, view)
collection.append(report)
else:
show_graph_report('Graph Slice', graph)
```
#### File: decompiler/decompiler/mlil_ast.py
```python
from __future__ import annotations
from functools import cmp_to_key, reduce
from itertools import product, repeat
from typing import List
from z3 import And, BoolVal, Not, Or, Tactic, is_false, is_true, simplify
from binaryninja import (
BranchType,
InstructionTextTokenType,
MediumLevelILBasicBlock,
MediumLevelILFunction,
MediumLevelILInstruction,
MediumLevelILOperation,
PossibleValueSet,
RegisterValueType,
ReportCollection,
Settings,
log_debug,
log_info,
log_warn,
show_report_collection,
)
from .condition_visitor import ConditionVisitor
from .debug import generate_graph, graph_slice
from .nodes import (
MediumLevelILAstBasicBlockNode,
MediumLevelILAstBreakNode,
MediumLevelILAstCaseNode,
MediumLevelILAstCondNode,
MediumLevelILAstElseNode,
MediumLevelILAstLoopNode,
MediumLevelILAstNode,
MediumLevelILAstSeqNode,
MediumLevelILAstSwitchNode,
)
def region_sort(nodes):
log_debug("region_sort")
log_debug(f"initial: {nodes}")
sorted_region = {}
sorted_region_reverse = {}
for i in range(len(nodes)):
for j in range(i, len(nodes)):
if i == j:
sorted_region[i] = sorted_region.get(i, list())
sorted_region_reverse[i] = sorted_region_reverse.get(i, list())
continue
if nodes[i] < nodes[j]:
if nodes[j] > nodes[i]:
sorted_region[i] = sorted_region.get(i, list())
sorted_region[i].append(j)
sorted_region_reverse[j] = sorted_region_reverse.get(
j, list()
)
sorted_region_reverse[j].append(i)
else:
if nodes[i] > nodes[j]:
sorted_region[j] = sorted_region.get(j, list())
sorted_region[j].append(i)
sorted_region_reverse[i] = sorted_region_reverse.get(
i, list()
)
sorted_region_reverse[i].append(j)
log_debug(f"sorted_region: {sorted_region}")
log_debug(f"sorted_region_reverse: {sorted_region_reverse}")
new_order = []
added = set()
sentinel = 0
# { 0: [4], 1: [0, 2, 3, 4], 2: [0, 4], 3: [0, 2, 4], 4: [] }
while any(j not in added for j in range(len(nodes))):
for i in range(len(nodes)):
if i in added:
continue
if not sorted_region_reverse[i] or all(
x in added for x in sorted_region_reverse[i]
):
added.add(i)
new_order.append(nodes[i])
log_debug(f"current: {new_order}")
log_debug(f"{any(j not in added for j in range(len(nodes)))}")
sentinel += 1
if sentinel > 20:
break
log_debug(f"new_order: {new_order}")
return new_order
class MediumLevelILAst(object):
def __init__(self, function: MediumLevelILFunction):
self._function = function
self.view = function.source_function.view
self._nodes = {}
self._root = MediumLevelILAstBasicBlockNode(
self, function.basic_blocks[0]
)
self._regions = {}
self._reaching_conditions = {}
self._reaching_constraints = {}
self.report_collection = None
self.view.session_data["CurrentAST"] = self
def __getitem__(self, bb) -> MediumLevelILAstNode:
return self._nodes[bb]
def __setitem__(self, bb, node) -> None:
self._nodes[bb] = node
def pop(self, bb) -> MediumLevelILAstNode:
self._nodes.pop(bb)
def __contains__(self, bb) -> bool:
return bb in self._nodes
@property
def function(self) -> MediumLevelILFunction:
return self._function
@property
def root(self) -> MediumLevelILAstNode:
return self._root
@root.setter
def root(self, new_root: MediumLevelILAstNode):
if not isinstance(new_root, MediumLevelILAstNode):
raise TypeError(
"new_root must be a MediumLevelILAstNode, got "
f"{type(new_root)}"
)
self._root = new_root
@property
def cycles(self) -> set:
return set(self._cycles)
@property
def regions(self):
return sorted(
self._regions.items(),
key=cmp_to_key(
lambda i, j: 1
if self.reaching_conditions.get((i[0].start, j[0].start)) is None
else -1
),
)
@property
def nodes(self):
return dict(self._nodes)
def case_sort(self, cases: List[MediumLevelILAstCaseNode]):
log_debug("case_sort")
log_debug(f"initial: {cases}\n\n")
fallsthrough = {}
sorted_cases = sorted(cases)
log_debug(f"sorted_cases: {sorted_cases}\n\n")
for i in range(len(sorted_cases)):
for j in range(i, len(sorted_cases)):
if self.reaching_conditions.get(
(sorted_cases[i].start, sorted_cases[j].start)
):
log_debug(
f"i->j {self.reaching_conditions[(sorted_cases[i].start,sorted_cases[j].start)]}"
)
fallsthrough[i] = fallsthrough.get(i, list())
fallsthrough[i].append(j)
elif self.reaching_conditions.get(
(sorted_cases[j].start, sorted_cases[i].start)
):
log_debug(
f"j->i {self.reaching_conditions[(sorted_cases[j].start,sorted_cases[i].start)]}"
)
fallsthrough[j] = fallsthrough.get(j, list())
fallsthrough[j].append(i)
new_sorted = []
for case in sorted_cases:
if case is None:
continue
if case in fallsthrough:
others = fallsthrough[case]
log_debug(f"fallsthrough[{case}]: {others}")
# Collect cases and replace them with None
# Don't collect if it's already None, because
# that means we already got it.
sub_list = (
[sorted_cases[case]]
if sorted_cases[case] is not None
else []
) + [sorted_cases[o] for o in others if o is not None]
map(lambda i: sorted_cases.insert(i, None), [case] + [others])
new_sorted += sub_list
else:
new_sorted.append(case)
log_debug(f"{new_sorted}")
return new_sorted
def any_node_dominated(
self,
sub_region: MediumLevelILAstNode,
block: MediumLevelILBasicBlock,
bb: MediumLevelILBasicBlock,
):
log_debug(f"any_node_dominated: {bb} {block} {sub_region}")
to_visit = []
def add_to_visit(_node: MediumLevelILAstNode):
nonlocal to_visit
if _node.type in ("seq", "case"):
to_visit += _node.nodes
elif _node.type == "switch":
to_visit += _node.cases
else:
log_debug(f"add {_node.type} to add_to_visit")
add_to_visit(sub_region)
while to_visit:
node = to_visit.pop()
add_to_visit(node)
if node.type != "case":
continue
log_debug(f"checking {node.block}")
reaching_conditions = self.reaching_conditions.get(
(bb.start, node.start)
)
if reaching_conditions is None:
continue
for rc in reaching_conditions:
targets = [e.target for e in rc]
if block not in targets:
return True
return False
def order_basic_blocks(self):
log_debug("order_basic_blocks")
visited = set()
ordering = []
def order(bb: MediumLevelILBasicBlock):
visited.add(bb)
for o in bb.outgoing_edges:
if not o.back_edge and o.target not in visited:
order(o.target)
ordering.append(bb)
order(self._function.basic_blocks[0])
log_debug(f"ordering: {ordering}")
return ordering
def calculate_reaching_conditions(self):
# TODO: add temporary node such that no_return nodes
# think that they drop into the next region
outgoing_edges = {
bb.start: bb.outgoing_edges
for bb in self._function.basic_blocks
}
reaching_conditions = {}
visited_nodes = set()
# recursive method to create an iterator of the edges in
# the CFG as a DFS
def dfs_next_edge(bb, target):
log_debug(f"--dfs_next_edge({bb})--")
for o in outgoing_edges[bb]:
# log_info(f"yielding {o}")
yield o
if not o.back_edge:
if o.target.start == target:
continue
for t in dfs_next_edge(o.target.start, target):
yield t
for ns, ne in product(self.order_basic_blocks(), repeat=2):
if ns == ne:
continue
# log_info(f"({ns}, {ne})")
dfs_stack = []
visited_edges = set()
visited_nodes = set()
for e in dfs_next_edge(ns.start, ne.start):
# log_info(f" {e.type!r} {e.source} -> {e.target}")
nt = e.target
if e.back_edge:
visited_edges.add((e.source.start, e.target.start))
if (e.source.start, e.target.start) in visited_edges:
# log_info(f" edge in visited_edges")
pass
elif (e.source.start, e.target.start) not in visited_edges and nt.start not in visited_nodes:
# log_info(f" adding edge to edges")
visited_edges.add((e.source.start, e.target.start))
visited_nodes.add(nt.start)
dfs_stack = dfs_stack + [e]
if nt == ne and dfs_stack:
# log_info(f"{nt} == {ne}")
# log_info(" adding finished slice")
reaching_conditions[
(ns.start, ne.start)
] = reaching_conditions.get((ns.start, ne.start), list())
reaching_conditions[(ns.start, ne.start)].append(dfs_stack)
elif (nt.start, ne.start) in reaching_conditions and e not in dfs_stack:
# log_info(" hit simple path, adding finished slice")
reaching_conditions[(ns.start, ne.start)] = reaching_conditions.get(
(ns.start, ne.start), list()
)
reaching_conditions[(ns.start, ne.start)].append(dfs_stack)
visited_edges.add((e.source.start, e.target.start))
if dfs_stack:
# log_info(f" {dfs_stack}")
pass
while len(dfs_stack) and all(
descendant.target.start in visited_nodes
or descendant.source == ne
or descendant.back_edge
for descendant in outgoing_edges.get(dfs_stack[-1].target, [])
):
# log_info(f" popping {dfs_stack[-1]}")
dfs_stack = dfs_stack[:-1]
visited_nodes.remove(ne.start) if ne.start in visited_nodes else None
if (ns.start, ne.start) in reaching_conditions:
graph_slice(
self.view,
ns,
ne,
reaching_conditions[(ns.start, ne.start)],
self.report_collection,
)
# log_info(f"finished slices: {reaching_conditions[(ns.start, ne.start)]}")
# log_info("-----------")
self._reaching_conditions = reaching_conditions
# this is a modified version of Algorithm 1 in "no more gotos"
@property
def reaching_conditions(self):
return dict(self._reaching_conditions)
@property
def reaching_constraints(self):
return dict(self._reaching_constraints)
def generate(self):
self.report_collection = ReportCollection()
# step 1: identify cycles
self._cycles = {
e.target
for bb in self.function.basic_blocks
for e in bb.outgoing_edges
if e.back_edge
}
# step 2a: generate the reaching conditions
# TODO: Change this to only happen on demand, because there
# are probably a lot of combinations that are never actually
# checked. The results should be cached, and paths that do
# not exist should be represented as `None` so that it knows
# not to try to generate it again.
self.calculate_reaching_conditions()
# step 2b: generate the z3 constraints of these conditions
# TODO: Change this to only happen on demand, because there
# are probably some constraints that never actually need
# to be converted to z3 constraints. Cache the results.
self.generate_reaching_constraints()
# step 3: find all the regions
self._regions = self._find_regions()
generate_graph(
self.view,
self.regions[0][1],
self.report_collection,
"After Step 3",
)
# step 4: merge if/else statements
self._merge_if_else()
generate_graph(
self.view,
self.regions[0][1],
self.report_collection,
"After Step 4",
)
# step 5: remove conditions from nodes that don't need them
self._fold_conditions()
generate_graph(
self.view,
self.regions[0][1],
self.report_collection,
"After Step 5",
)
# step 6: refine loops
self._refine_loops()
generate_graph(
self.view,
self.regions[0][1],
self.report_collection,
"After Step 6",
)
if not Settings().get_bool("linearmlil.debug"):
return
show_report_collection("AST Generation", self.report_collection)
log_debug("finished with AST")
def _find_regions(self):
basic_blocks = self._function.basic_blocks
regions = self._regions
bb_queue = region_sort(
list(MediumLevelILAstBasicBlockNode(self, b) for b in basic_blocks)
)
log_debug(f"{bb_queue}")
while bb_queue:
bb = bb_queue.pop().block
if bb not in self._cycles:
new_region = self._create_acyclic_region(bb, regions, bb_queue)
else:
new_region = self._create_cyclic_region(bb, regions, bb_queue)
if new_region is None:
continue
if self.report_collection is not None:
generate_graph(self.view, new_region, self.report_collection)
log_debug(f"adding {new_region} to regions")
regions[bb] = new_region
if len(regions) > 1:
log_debug(f"Regions is larger than it should be: {regions}")
sorted_regions = region_sort(
[MediumLevelILAstBasicBlockNode(self, r) for r in regions]
)
root_nodes = []
for sr in sorted_regions:
root_nodes.append(regions[sr.block])
del regions[sr.block]
root_region = MediumLevelILAstSeqNode(self, root_nodes)
regions[root_region.block] = root_region
return regions
def _create_acyclic_region(
self, bb: MediumLevelILBasicBlock, regions: dict, bb_queue: list
):
if next((e for e in bb.outgoing_edges if e.back_edge), None):
# a back edge node isn't going to be the root of a region
return
current_node = None
cases = {}
log_debug(
f"{'='*40}\ncreating acyclic region for {bb.start}\n{'='*40}"
)
if bb[-1].operation == MediumLevelILOperation.MLIL_JUMP_TO:
switch = bb[-1].dest.possible_values.mapping
for case, block in switch.items():
log_debug(f"case {case}: {block:x}")
il_block = next(
b
for b in self._function.basic_blocks
if b.source_block.start == block
)
cases[il_block] = cases.get(il_block, list())
cases[il_block].append(case)
# TODO: figure out fall through cases
switch_condition = self._find_switch_condition(
bb[-1].dest, switch.keys()
)
current_node = MediumLevelILAstSwitchNode(
self, switch_condition, bb[-1]
)
else:
current_node = MediumLevelILAstSeqNode(self)
# bb must be the head of an acyclic region (for the moment)
possible_region = {
MediumLevelILAstBasicBlockNode(self, pr)
for pr in self._function.basic_blocks
if (bb.start, pr.start) in self.reaching_conditions
}
nodes = [MediumLevelILAstBasicBlockNode(self, bb)]
regions_in_this_region = {
r for r in possible_region if r.block in self._regions
}
log_debug(f"regions_in_this_region: {regions_in_this_region}")
possible_region = possible_region - regions_in_this_region
log_debug(f"possible_region: {possible_region}")
for r in regions_in_this_region:
r_has_multiple_incoming = len(r.block.incoming_edges) > 1
log_debug(
f"{r} r_has_multiple_constraints: "
f"{r_has_multiple_incoming}"
)
log_debug(
f"{r} {bb} in r.block.dominators: "
f"{bb in r.block.dominators}"
)
sub_region = regions[r.block]
if (
r.block in cases or not r_has_multiple_incoming
) or bb in r.block.dominators:
if self.create_new_node_from_region(
bb, r.block, sub_region, current_node, cases, nodes
):
del regions[r.block]
self.remove_sub_region_nodes(sub_region, possible_region)
for r in possible_region:
log_debug(f"Adding {r} to {bb.start}'s region")
nodes.append(r)
if r.block in bb_queue:
bb_queue.remove(r.block)
if current_node.type == "switch":
current_node._cases = self.case_sort(current_node._cases)
for case in current_node._cases:
# if there are no reaching conditions, then
# this doesn't fall through. Insert a break node.
if all(
self.reaching_conditions.get((case.start, other.start))
is None
for other in current_node._cases
):
case.append(
MediumLevelILAstBreakNode(
self, case.nodes[-1].start, case.nodes[-1].address
)
)
nodes.append(current_node)
current_node = MediumLevelILAstSeqNode(self, nodes)
current_node._nodes = sorted(nodes)
if current_node.type == "seq":
current_node.flatten_sequence()
new_region = current_node
log_debug(f"Returning {new_region}")
return new_region
# TODO: Figure out why this breaks on bomb.bndb Phase 2
def _create_cyclic_region(
self, bb: MediumLevelILBasicBlock, regions: dict, bb_queue: list
) -> MediumLevelILAstNode:
log_debug(f"_create_cyclic_region({bb}, regions, bb_queue)")
log_debug(f"{'='*40}\ncreating cyclic region for {bb.start}\n{'='*40}")
# Section C.1 in whitepaper: Initial Loop Nodes and Successors
latching_nodes = {e.source for e in bb.incoming_edges if e.back_edge}
loop_slice = {
n
for l in latching_nodes
if bb != l
for s in self.reaching_conditions[(bb.start, l.start)]
for n in s
}
loop_nodes = set()
loop_nodes.add(bb)
for e in loop_slice:
loop_nodes.add(e.target)
loop_nodes.add(e.source)
log_debug(f'original loop_nodes: {loop_nodes}')
successor_nodes = {
e.target
for n in loop_nodes
for e in n.outgoing_edges
if e.target not in loop_nodes
}
log_debug(f"original successor_nodes: {successor_nodes}")
# Section C.2 in whitepaper: Successor Refinement and Loop
# Membership
while len(successor_nodes) > 1:
new = set()
old = set()
for successor in sorted(
list(successor_nodes), key=lambda i: i.start
):
# add a successor to the loop if both hold true:
# a) the successor is dominated by the start of the
# loop
# b) the successor's immediate predecessors are all in
# the loop.
if bb in successor.dominators and all(
incoming.source in loop_nodes
for incoming in successor.incoming_edges
):
loop_nodes.add(successor)
old.add(successor)
new = new & {e.target for e in successor.outgoing_edges}
if old:
successor_nodes = successor_nodes - old
if new:
successor_nodes = set(*successor_nodes, *new)
else:
break
log_debug(f"final successor_nodes: {successor_nodes}")
log_debug(f"loop_nodes: {loop_nodes}")
loop_node = MediumLevelILAstLoopNode(
self,
MediumLevelILAstSeqNode(
self, [MediumLevelILAstBasicBlockNode(self, bb)]
),
)
loop_nodes = [
MediumLevelILAstBasicBlockNode(self, block) for block in loop_nodes
]
sorted_loop_nodes = region_sort(loop_nodes)
loop_nodes = list(sorted_loop_nodes)
log_debug(f"Sorted loop_nodes: {loop_nodes}")
# remove any regions that are in the loop nodes
nodes = []
while sorted_loop_nodes:
r = sorted_loop_nodes.pop(0)
log_debug(f"Iterating on {r.block}")
if r.block in regions:
log_debug(f"Found {r.block} in regions")
sub_region = regions[r.block]
else:
log_debug(f"{r.block} not in regions, creating Seq Node")
sub_region = MediumLevelILAstSeqNode(
self, [MediumLevelILAstBasicBlockNode(self, r.block)]
)
log_debug(f"Creating node for {r.block}")
if self.create_new_node_from_region(
bb, r.block, sub_region, loop_node, None, nodes
):
if r.block in regions:
log_debug(f"Removing region for {r.block}")
del regions[r.block]
log_debug(f"Removing {sub_region} from loop_nodes")
self.remove_sub_region_nodes(sub_region, sorted_loop_nodes)
for n in loop_nodes:
log_debug(f"Removing {n} from bb_queue")
if n.block in bb_queue:
bb_queue.remove(n.block)
log_debug("Adding break nodes for successors")
successor_cond = None
successor_node = None
for successor in successor_nodes:
log_debug(f"successor: {successor}")
reaching_constraint = self._reaching_constraints.get(
(bb.start, successor.start)
)
break_node = MediumLevelILAstCondNode(
self,
reaching_constraint,
successor[0],
MediumLevelILAstSeqNode(
self,
[
MediumLevelILAstBreakNode(
self, successor.start, successor.source_block.start
)
],
),
)
nodes.append(break_node)
# if successor in self._regions:
# successor_node = self._regions[successor]
# del self._regions[successor]
# else:
# successor_node = MediumLevelILAstSeqNode(
# self,
# [MediumLevelILAstBasicBlockNode(self, successor)],
# )
# convert the successor nodes to a chain of
# condition nodes for each successor
if len(successor_nodes) > 1:
successor_cond = MediumLevelILAstCondNode(
self,
self.reaching_constraints.get((bb.start, successor.start)),
successor.source_block[0],
successor_node,
successor_cond,
)
if successor_cond is not None:
successor_node = successor_cond
# TODO: Is this right?
self._regions[successor_cond.block] = successor_node
body = MediumLevelILAstSeqNode(self, nodes)
loop_node._body = body
if successor_node is not None:
region_nodes = [loop_node, successor_node]
new_region = MediumLevelILAstSeqNode(self, region_nodes)
else:
new_region = loop_node
return new_region
def create_new_node_from_region(
self,
bb: MediumLevelILBasicBlock,
block: MediumLevelILBasicBlock,
sub_region: MediumLevelILAstNode,
current_node: MediumLevelILAstNode,
cases: dict,
nodes: list
):
log_debug(
f"create_new_node_from_region({bb}, {block}, {sub_region}, {current_node})"
)
reaching_constraint = self._reaching_constraints.get((bb.start, block.start))
if is_true(reaching_constraint):
reaching_constraint = None
if reaching_constraint is not None and self.any_node_dominated(
sub_region, block, bb
):
reaching_constraint = None
if reaching_constraint is not None:
if sub_region.type == "loop":
sub_region = MediumLevelILAstSeqNode(self, [sub_region])
# This is now a condition node if a reaching constraint exists
log_debug(
f" Creating new CondNode with {sub_region} {reaching_constraint}\n\n"
)
new_node = MediumLevelILAstCondNode(
self,
reaching_constraint,
block.incoming_edges[0].source[-1],
sub_region,
)
else:
new_node = sub_region
if new_node is not None:
if current_node.type != "switch":
nodes.append(new_node)
else:
if block in cases:
if current_node.block not in new_node.block.dominators:
case = ["default"]
else:
case = cases[block]
current_node.append(
MediumLevelILAstCaseNode(self, case, [new_node])
)
else:
return False
return True
def remove_sub_region_nodes(self, sub_region, possible_region):
if sub_region.type == "seq":
to_remove = sub_region.nodes
elif sub_region.type == "loop":
to_remove = sub_region.body.nodes
else:
raise TypeError(
"I don't know why I got a "
f"{type(sub_region)} for a sub_region"
)
while to_remove:
sub = to_remove.pop()
if sub.type in ("seq", "case"):
to_remove += sub.nodes
elif sub.type == "loop":
to_remove += sub.body.nodes
elif sub.type == "cond":
to_remove.append(sub[True])
elif sub.type == "block":
if sub in possible_region:
log_debug(f"removing {sub} from possible_region")
possible_region.remove(sub)
elif sub.type == "switch":
to_remove += sub.cases
else:
log_debug(f"got {sub} while iterating over to_remove")
def _find_switch_condition(self, dest, cases):
def check_ranges(ranges, cases):
for r in ranges:
for i in range(r.start, r.end, r.step):
if i not in cases:
return False
return True
if dest.operation == MediumLevelILOperation.MLIL_VAR:
dest = self._function.get_ssa_var_definition(dest.ssa_form.src).src
to_visit = dest.operands
result = None
while to_visit:
current_operand = to_visit.pop()
if not isinstance(current_operand, MediumLevelILInstruction):
continue
to_visit += current_operand.operands
pv = current_operand.possible_values
if not isinstance(pv, PossibleValueSet):
continue
if pv.type == RegisterValueType.LookupTableValue:
if (
current_operand.operation
== MediumLevelILOperation.MLIL_VAR
):
to_visit.append(
self._function.get_ssa_var_definition(
current_operand.ssa_form.src
)
)
continue
if pv.type not in (
RegisterValueType.UnsignedRangeValue,
RegisterValueType.SignedRangeValue,
RegisterValueType.InSetOfValues,
):
continue
if pv.type != RegisterValueType.InSetOfValues:
if not check_ranges(pv.ranges, cases):
continue
else:
result = current_operand
break
# If it's InSetOfValues, check to make sure
# all of the values are in pv.values
if all(v in cases for v in pv.values) and len(cases) == len(
pv.values
):
result = current_operand
break
else:
continue
return ConditionVisitor(self.view).simplify(result)
def _merge_if_else(self):
log_debug("_merge_if_else")
nodes_to_remove = True
while nodes_to_remove:
root = self.regions[0][1]
if root.type == "loop":
nodes_to_remove = self.find_if_else_for_node(
root, root.body.nodes
)
else:
nodes_to_remove = self.find_if_else_for_node(root, root.nodes)
for n in nodes_to_remove:
root._nodes.remove(n) if n in root._nodes else None
nodes_to_check = root.nodes if root.type == "seq" else root.body.nodes
nodes_to_remove = []
while nodes_to_check:
if not nodes_to_remove:
node = nodes_to_check.pop()
if node is None:
continue
nodes_to_remove = []
if node.type == "seq":
nodes_to_remove = self.find_if_else_for_node(node, node.nodes)
_nodes = node._nodes
elif node.type == "loop":
nodes_to_remove = self.find_if_else_for_node(
node.body, node.body.nodes
)
_nodes = node.body._nodes
for n in nodes_to_remove:
_nodes.remove(n) if n in _nodes else None
if node.type == "seq":
nodes_to_check += node.nodes
elif node.type == "loop":
nodes_to_check += node.body.nodes
elif node.type == "cond":
nodes_to_check.append(node[True]) if node[True] else None
nodes_to_check.append(node[False]) if node[False] else None
def find_if_else_for_node(self, parent: MediumLevelILAstNode, nodes: list):
log_debug(f"find_if_else_for_node")
nodes_to_check = list(nodes)
nodes_to_remove = []
while nodes_to_check:
ni = nodes_to_check.pop()
if ni.type != "cond":
continue
log_debug(f"checking {ni}")
for nj in nodes_to_check:
if nj.type != "cond":
continue
if ni == nj:
continue
log_debug(f"checking against {nj}")
if self.try_make_simple_if_else(
ni, nj, nodes_to_check, nodes_to_remove
):
break
if self.try_make_complex_if_else(
parent, ni, nj, nodes_to_check, nodes_to_remove
):
break
generate_graph(self.view, ni, self.report_collection)
return nodes_to_remove
def try_make_simple_if_else(
self, node1, node2, nodes_to_check, nodes_to_remove
):
log_debug("try_make_simple_if_else")
cond1 = node1.condition
cond2 = node2.condition
if is_true(simplify(cond1 == Not(cond2))):
log_debug(f"found a simple if/else match")
if cond1.decl().name() == "not":
node2[False] = node1[True]
nodes_to_check.remove(node2)
nodes_to_remove.append(node1)
else:
node1[False] = node2[True]
nodes_to_remove.append(node2)
return True
return False
def try_make_complex_if_else(
self, parent, node1, node2, nodes_to_check, nodes_to_remove
):
log_debug("try_make_complex_if_else")
is_complex_if_else = self.find_c_and_R(
node1.condition, node2.condition
)
if not is_complex_if_else:
return False
else:
c, R = is_complex_if_else
# if we get here, we have a complex if/else
new_if_else_node = MediumLevelILAstCondNode(
self, c, node1._condition_il, node1[True], node2[True]
)
log_debug(f"R is currently {R}")
new_seq_node = MediumLevelILAstSeqNode(self, [new_if_else_node])
if is_true(R):
new_cond_node = new_if_else_node
else:
new_cond_node = MediumLevelILAstCondNode(
self, R, node1._condition_il, new_seq_node
)
log_debug(f"new_cond_node: {new_cond_node}")
if node1 in parent.nodes:
node1_idx = parent.nodes.index(node1)
parent._nodes[node1_idx] = new_cond_node
nodes_to_remove.append(node2)
nodes_to_check.remove(node2)
else:
log_debug(f"{node1} not in parent.nodes")
def find_c_and_R(self, cond1, cond2):
log_debug(f"{cond1} vs {cond2}")
if is_false(cond1) or is_false(cond2):
return False
if cond1.decl().name() != "and":
cond1 = And(cond1, BoolVal(True))
if cond2.decl().name() != "and":
cond2 = And(cond2, BoolVal(True))
to_visit = [(cond1, BoolVal(True))]
while to_visit:
current_cond, right_side = to_visit.pop()
log_debug(f"current: {current_cond} right: {right_side}")
# If the top level operation is not an And, we don't need
# to go any further.
if current_cond.decl().name() != "and":
log_debug(f"current_cond is {current_cond}")
return False
if current_cond.num_args() < 2:
log_debug(f"current_cond is {current_cond}")
return False
# try 0 and R first
c = current_cond.arg(0)
R = And(
*Tactic("ctx-solver-simplify")(
And(current_cond.arg(1), right_side)
)[0]
)
if R.num_args() == 0:
R = BoolVal(True)
log_debug(f"c: {c} R: {R} cond2: {cond2}")
if not Tactic("ctx-solver-simplify")(And(Not(c), R) == cond2)[0]:
log_debug(
f"Found complex if/else (0-1)! {R} and {c} | {cond2}"
)
return c, R
# try again, but the other way
c = current_cond.arg(1)
R = And(
*Tactic("ctx-solver-simplify")(
And(current_cond.arg(0), right_side)
)[0]
)
if R.num_args() == 0:
R = BoolVal(True)
log_debug(f"c: {c} R: {R} cond2: {cond2}")
if not Tactic("ctx-solver-simplify")(And(Not(c), R) == cond2)[0]:
log_debug(
f"Found complex if/else (1-0)! {R} and {c} | {cond2}"
)
return c, R
to_visit = [
(current_cond.arg(0), current_cond.arg(1)),
(current_cond.arg(1), current_cond.arg(0)),
]
return False
def _fold_conditions(self):
root = self.regions[0][1]
if root.type == "seq":
nodes = root.nodes
elif root.type == "loop":
nodes = root.body.nodes
for i, node in enumerate(nodes):
if node.type != "cond" or node[False] is not None:
continue
deps = node[True].block[0].branch_dependence
log_debug(f"Branch Dep for {node[True].block}: {deps}")
if (
next(
(
b
for b in nodes
if b.start in deps and b.type != "loop" and b != node
),
None,
)
is None
):
log_debug("This block doesn't need its condition!")
nodes[i] = MediumLevelILAstSeqNode(self, node[True]._nodes)
if root.type == "seq":
root._nodes = nodes
elif root.type == "loop":
root.body._nodes = nodes
def generate_reaching_constraints(self):
visitor = ConditionVisitor(self.view)
for (
(start, end),
reaching_condition,
) in self.reaching_conditions.items():
or_exprs = []
for condition in reaching_condition:
and_exprs = []
for edge in condition:
if edge.type == BranchType.UnconditionalBranch:
continue
if edge.type == BranchType.TrueBranch:
condition = edge.source[-1].condition
if (
condition.operation
== MediumLevelILOperation.MLIL_VAR
):
condition = self.function.get_ssa_var_definition(
edge.source[-1].ssa_form.condition.src
).src
and_exprs.append(visitor.simplify(condition))
elif edge.type == BranchType.FalseBranch:
condition = edge.source[-1].condition
if (
condition.operation
== MediumLevelILOperation.MLIL_VAR
):
condition = self.function.get_ssa_var_definition(
edge.source[-1].ssa_form.condition.src
).src
and_exprs += Tactic("ctx-solver-simplify")(
Not(visitor.simplify(condition))
)[0]
if and_exprs != []:
or_exprs.append(And(*and_exprs))
if or_exprs:
or_exprs = Tactic("ctx-solver-simplify")(Or(*or_exprs))[0]
reaching_constraint = (
And(*or_exprs)
if len(or_exprs) > 1
else or_exprs[0]
if len(or_exprs)
else BoolVal(True)
)
self._reaching_constraints[(start, end)] = reaching_constraint
def _refine_loops(self):
log_debug("_refine_loops")
to_visit = [r for r in self._regions.values()]
visited = set()
while to_visit:
node = to_visit.pop()
if node.type == "block":
continue
if node in visited:
log_debug(f"wtf, why have I visited {node} already")
break
if node.type == "seq":
to_visit += [n for n in node.nodes if n is not None]
elif node.type == "cond":
to_visit += node[True].nodes if node[True] else []
to_visit += node[False].nodes if node[False] else []
if node.type == "loop":
log_debug(f"{node}")
generate_graph(
self.view,
node,
self.report_collection,
f" {node.start} before refining",
)
while_condition = self._check_while(node)
if while_condition is not None:
self._convert_to_while_loop(node, while_condition)
dowhile_condition = self._check_do_while(node)
if dowhile_condition is not None:
self._convert_to_do_while_loop(node, dowhile_condition)
generate_graph(
self.view,
node,
self.report_collection,
f" {node.start} after refining",
)
to_visit += [n for n in node.body.nodes if n is not None]
def _check_while(self, loop_node: MediumLevelILAstLoopNode):
log_debug("_check_while")
if loop_node.loop_type != "endless":
log_debug(loop_node.loop_type)
return None
if loop_node.body.nodes[0].type != "cond":
log_debug(f"{loop_node.body.nodes[0].type}")
return None
log_debug(f"{loop_node.body.nodes[0][True].nodes}")
if loop_node.body.nodes[0][True].nodes[0].type == "break":
log_debug(f"The loop body is {loop_node.body.nodes}")
return loop_node.body.nodes[0].condition
log_debug(f"{loop_node.body.nodes[0][True].nodes}")
return None
def _convert_to_while_loop(
self, node: MediumLevelILAstLoopNode, while_condition
):
log_debug(f"{node} is a while loop")
node.loop_type = "while"
node.condition = reduce(
And, Tactic("ctx-solver-simplify")(Not(while_condition))[0]
)
break_cond = node.body.nodes[0]
if break_cond[False] is not None:
node.body._nodes[0] = break_cond[False]
# Flatten condition nodes that have the same condition
# as the loop condition
for idx, child in enumerate(node.body.nodes):
if (
isinstance(child, MediumLevelILAstCondNode)
and is_true(simplify(child.condition == node.condition))
and child[False] is None
):
node.body._nodes[idx] = child[True]
def _check_do_while(self, loop_node: MediumLevelILAstLoopNode) -> bool:
log_debug("_check_do_while")
log_debug(f"{loop_node.body.nodes}")
if loop_node.loop_type != "endless":
log_debug(loop_node.loop_type)
return None
if loop_node.body.nodes[-1].type != "cond":
log_debug(f"final node is: {loop_node.body.nodes[-1].type}")
return None
log_debug(
f"final cond true node: {loop_node.body.nodes[-1][True].nodes}"
)
if loop_node.body.nodes[-1][True].nodes[0].type == "break":
return loop_node.body.nodes[-1].condition
return None
def _convert_to_do_while_loop(
self, node: MediumLevelILAstLoopNode, dowhile_condition
):
log_debug(f"{node} is a do while loop")
node.loop_type = "dowhile"
node.condition = reduce(
And, Tactic("ctx-solver-simplify")(Not(dowhile_condition))[0]
)
break_cond = node.body.nodes[-1]
if break_cond[False] is not None:
node.body._nodes[-1] = break_cond[False]
else:
node.body._nodes.pop()
# Flatten condition nodes that have the same condition
# as the loop condition
for idx, child in enumerate(node.body.nodes):
if (
isinstance(child, MediumLevelILAstCondNode)
and is_true(simplify(child.condition == node.condition))
and child[False] is None
):
node.body._nodes[idx] = child[True]
log_debug(f"Checking {child} for break condition")
if isinstance(child, MediumLevelILAstCondNode) and is_false(
simplify(And(child.condition, node.condition))
):
break_instr = child[True].nodes[-1].block[-1]
child[True]._nodes.append(
MediumLevelILAstBreakNode(
self, break_instr.instr_index, break_instr.address
)
)
new_loop_condition = self._split_break_condition(
node.condition, child.condition
)
if new_loop_condition is not None:
log_debug(f"new_loop_condition is {new_loop_condition}")
node.condition = new_loop_condition
def _split_break_condition(self, loop_condition, break_condition):
log_debug(f"{loop_condition} vs {break_condition}")
if loop_condition.decl().name() != "and":
loop_condition = And(loop_condition, BoolVal(True))
to_visit = [(loop_condition, BoolVal(True))]
while to_visit:
current_cond, right_side = to_visit.pop()
log_debug(f"current: {current_cond} right: {right_side}")
# If the top level operation is not an And, we don't need
# to go any further.
if current_cond.decl().name() != "and":
log_debug(f"current_cond is {current_cond}")
return None
if current_cond.num_args() < 2:
log_debug(f"current_cond is {current_cond}")
return None
# try 0 and R first
c = current_cond.arg(0)
R = And(
*Tactic("ctx-solver-simplify")(
And(current_cond.arg(1), right_side)
)[0]
)
if R.num_args() == 0:
R = BoolVal(True)
log_debug(f"c: {c} R: {R} break_condition: {break_condition}")
if not Tactic("ctx-solver-simplify")(c == Not(break_condition))[0]:
log_debug(
f"Found break condition (0-1)! "
f"{R} and {c} | {break_condition}"
)
return simplify(R)
# try again, but the other way
c = current_cond.arg(1)
R = And(
*Tactic("ctx-solver-simplify")(
And(current_cond.arg(0), right_side)
)[0]
)
if R.num_args() == 0:
R = BoolVal(True)
log_debug(f"c: {c} R: {R} break_condition: {break_condition}")
if not Tactic("ctx-solver-simplify")(c == Not(break_condition))[0]:
log_debug(
f"Found break condition (1-0)! "
f"{R} and {c} | {break_condition}"
)
return simplify(R)
to_visit = [
(current_cond.arg(0), current_cond.arg(1)),
(current_cond.arg(1), current_cond.arg(0)),
]
return None
def __str__(self):
output = ""
for il in self.root.block:
if il.instr_index != self.root.block.end - 1:
output += f"{il}\n"
to_visit = [
(node, 0)
for header, node in sorted(
self._regions.items(), key=lambda i: i[0].start, reverse=True
)
]
prev_indent = 0
while to_visit:
node, indent = to_visit.pop()
if indent < prev_indent:
output += "\n"
if isinstance(node, MediumLevelILAstSeqNode):
if isinstance(node, MediumLevelILAstElseNode):
output += f'{" "*indent}else:\n'
indent += 4
for il in node.header:
if (
il.instr_index == node.header.end - 1
) and il.operation not in (
MediumLevelILOperation.MLIL_RET,
MediumLevelILOperation.MLIL_RET_HINT,
):
continue
tokens = ""
for t in il.tokens:
if (
t.type
!= InstructionTextTokenType.PossibleAddressToken
):
tokens += t.text
elif self.view.get_symbols(t.value, 1):
tokens += self.view.get_symbols(t.value, 1)[0].name
elif self.view.get_function_at(t.value):
tokens += self.view.get_function_at(t.value).name
else:
tokens += t.text
output += f'{" "*indent}{tokens}\n'
to_visit += zip(reversed(node.children), repeat(indent))
elif isinstance(node, MediumLevelILAstCondNode):
output += f'{" "*indent}if ({node.condition}) then:\n'
to_visit.append((node[False], indent))
to_visit.append((node[True], indent + 4))
elif isinstance(node, MediumLevelILAstSwitchNode):
output += f'{" "*indent}switch({node.switch}):\n'
to_visit += zip(
reversed(sorted(node.cases.items(), key=lambda i: i[0])),
repeat(indent + 4),
)
elif isinstance(node, tuple):
output += f'{" "*indent}case {node[0]}:\n'
to_visit += [(node[1], indent + 4)]
prev_indent = indent
return output
```
#### File: decompiler/decompiler/token_visitor.py
```python
from itertools import chain
from binaryninja import (InstructionTextToken, InstructionTextTokenType,
MediumLevelILOperation, SymbolType, TypeClass,
Variable, log)
from .bnilvisitor import BNILVisitor
class TokenVisitor(BNILVisitor):
def visit(self, expr):
value = super().visit(expr)
if value is None:
return expr.tokens
else:
return value
def visit_MLIL_STORE(self, expr):
tokens = ArrayTokenVisitor().visit(expr.dest)
if not isinstance(tokens, list):
dest_tokens = self.visit(expr.dest)
# Add the '*'
tokens = [
InstructionTextToken(InstructionTextTokenType.TextToken, "*")
]
if len(dest_tokens) == 1:
tokens.extend(dest_tokens)
else:
tokens.extend(
[
InstructionTextToken(
InstructionTextTokenType.TextToken, "("
),
*dest_tokens,
InstructionTextToken(
InstructionTextTokenType.TextToken, ")"
),
]
)
src_tokens = self.visit(expr.src)
tokens.extend(
[
InstructionTextToken(
InstructionTextTokenType.TextToken, " = "
),
*src_tokens,
]
)
return tokens
def visit_MLIL_LOAD(self, expr):
src_tokens = ArrayTokenVisitor().visit(expr.src)
if isinstance(src_tokens, list):
return src_tokens
src_tokens = self.visit(expr.src)
tokens = [
InstructionTextToken(InstructionTextTokenType.TextToken, "*")
]
if len(src_tokens) == 1:
tokens.extend(src_tokens)
else:
tokens.extend(
[
InstructionTextToken(
InstructionTextTokenType.TextToken, "("
),
*src_tokens,
InstructionTextToken(
InstructionTextTokenType.TextToken, ")"
),
]
)
return tokens
def visit_MLIL_SET_VAR(self, expr):
src_tokens = self.visit(expr.src)
return [
InstructionTextToken(
InstructionTextTokenType.LocalVariableToken,
expr.dest.name,
expr.dest.identifier
),
InstructionTextToken(
InstructionTextTokenType.TextToken,
' = '
),
*src_tokens
]
def visit_MLIL_SET_VAR_FIELD(self, expr):
src_tokens = self.visit(expr.src)
dest = expr.dest
offset = expr.offset
size = expr.size
if dest.type.width == size and offset == 0:
return [
InstructionTextToken(
InstructionTextTokenType.LocalVariableToken,
expr.dest.name,
expr.dest.identifier
),
InstructionTextToken(
InstructionTextTokenType.TextToken,
' = '
),
*src_tokens
]
def visit_MLIL_VAR_FIELD(self, expr):
src = expr.src
offset = expr.offset
size = expr.size
if src.type.width == size and offset == 0:
return [
InstructionTextToken(
InstructionTextTokenType.LocalVariableToken,
expr.src.name,
expr.src.identifier
)
]
def visit_MLIL_CALL(self, expr):
log.log_debug(f'visit_MLIL_CALL: {expr}')
output = [
InstructionTextToken(
InstructionTextTokenType.LocalVariableToken,
v.name,
v.identifier
)
for v in expr.output
]
dest = self.visit(expr.dest)
params = [self.visit(p) for p in expr.params]
for p in params[:-1]:
p.append(
InstructionTextToken(
InstructionTextTokenType.TextToken,
', '
)
)
log.log_debug(f'output: {output}')
log.log_debug(f'dest: {dest}')
log.log_debug(f'params: {list(chain(*params))}')
return [
*output,
InstructionTextToken(
InstructionTextTokenType.TextToken,
' = ' if output else ''
),
*dest,
InstructionTextToken(
InstructionTextTokenType.TextToken,
'('
),
*chain(*params),
InstructionTextToken(
InstructionTextTokenType.TextToken,
')'
)
]
def visit_MLIL_MUL(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return [
*left,
InstructionTextToken(
InstructionTextTokenType.TextToken,
' * '
),
*right
]
def visit_MLIL_ZX(self, expr):
return self.visit(expr.src)
def visit_MLIL_CONST_PTR(self, expr):
log.log_debug(f'MLIL_CONST_PTR: {expr.constant:x}')
view = expr.function.source_function.view
symbol = view.get_symbol_at(expr.constant)
string = view.get_string_at(expr.constant)
if string is not None:
return [
InstructionTextToken(
InstructionTextTokenType.StringToken,
repr(string.value),
string.start
)
]
elif symbol is not None:
NormalSymbols = (SymbolType.FunctionSymbol, SymbolType.DataSymbol)
ImportSymbols = (
SymbolType.ImportedFunctionSymbol,
SymbolType.ImportedDataSymbol
)
return [
InstructionTextToken(
(
InstructionTextTokenType.CodeSymbolToken
if symbol.type in NormalSymbols
else InstructionTextTokenType.ImportToken
if symbol.type in ImportSymbols
else InstructionTextTokenType.PossibleAddressToken
),
symbol.short_name,
expr.constant,
size=expr.size,
address=expr.address
)
]
visit_MLIL_CONST = visit_MLIL_CONST_PTR
visit_MLIL_IMPORT = visit_MLIL_CONST_PTR
class ArrayTokenVisitor(BNILVisitor):
def visit_MLIL_CONST(self, expr):
return expr.constant
visit_MLIL_CONST_PTR = visit_MLIL_CONST
def visit_MLIL_VAR(self, expr):
return expr.src
def visit_MLIL_VAR_FIELD(self, expr):
# TODO this is not going to work potentially
return expr.src
def visit_MLIL_LSL(self, expr):
return self.visit(expr.left), self.visit(expr.right)
def visit_MLIL_ADDRESS_OF(self, expr):
return expr.src
def visit_MLIL_ADD(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
if (
not isinstance(left, Variable) or
(
left.type.type_class != TypeClass.ArrayTypeClass and
left.type.type_class != TypeClass.PointerTypeClass and
expr.left.operation != MediumLevelILOperation.MLIL_ADDRESS_OF
)
):
return
if isinstance(right, int):
element_width = left.type.element_type.width
index = element_width // right
elif isinstance(right, tuple):
index_shift = right[1]
index = right[0]
return [
InstructionTextToken(
InstructionTextTokenType.LocalVariableToken,
left.name,
left.identifier
),
InstructionTextToken(
InstructionTextTokenType.TextToken,
'['
),
InstructionTextToken(
InstructionTextTokenType.LocalVariableToken,
index.name,
index.identifier
) if isinstance(index, Variable)
else InstructionTextToken(
InstructionTextTokenType.IntegerToken,
str(index),
index
),
InstructionTextToken(
InstructionTextTokenType.TextToken,
']'
)
]
```
#### File: emulator/emulatorui/buttons.py
```python
import time
from binaryninja import (AddressField, BackgroundTaskThread, ChoiceField,
HighlightStandardColor, Settings,
execute_on_main_thread_and_wait, get_form_input, log)
from binaryninjaui import FileContext, LinearView, UIContext, ViewFrame
from emulator.errors import (UnimplementedOperationError,
UninitializedRegisterError)
from PySide2.QtCore import SIGNAL, QObject
from PySide2.QtGui import QFont, QFontMetrics
from PySide2.QtWidgets import QHBoxLayout, QPushButton, QWidget
from .hooks import add_hook, remove_hook
from .memory import EmulatorMemoryModel, rewrite_segments
from .stack import EmulatorStackModel
from .registers import RegisterEmulatorModel
class EmulatorRunTaskThread(BackgroundTaskThread):
def __init__(self, widget, emulator, il):
self.widget = widget
self.emulator = emulator
self.starting_il = il
super().__init__()
def run(self):
il = self.starting_il
view = self.emulator.view
self.emulator.set_next_instr_index(il.function, il.instr_index)
self.widget.running = True
while self.widget.running:
if (il.function, il.instr_index) in self.emulator.breakpoints:
il.function.source_function.set_user_instr_highlight(
il.address,
HighlightStandardColor.NoHighlightColor
)
view.navigate(view.file.view, il.address)
break
if self.widget.execute_one_instruction(self.emulator, il):
il = self.emulator.current_function[
self.emulator.current_instr_index
]
else:
break
print('Complete')
class EmulatorButton(QPushButton):
def __init__(self, view, label, callback):
super().__init__(label)
self.callback = callback
self.view = view
font_name = Settings().get_string('ui.font.name')
font_size = Settings().get_integer('ui.font.size')
button_font = QFont(font_name, font_size)
fm = QFontMetrics(button_font)
self.setFont(button_font)
self.setFixedWidth(fm.horizontalAdvance(label) + 10)
QObject.connect(self, SIGNAL('clicked()'), self.callback)
class EmulatorButtonsWidget(QWidget):
def __init__(self, parent, view):
super().__init__(parent)
self.view = view
self.view.session_data['emulator.buttons.widget'] = self
self.running = False
self.reset_button = EmulatorButton(view, '♻️', self.reset)
self.reset_button.setToolTip('Reset emulator')
self.run_button = EmulatorButton(view, '▶️', self.run)
self.run_button.setToolTip('Run emulator')
self.run_to_button = EmulatorButton(view, '⏭', self.run_to)
self.run_to_button.setToolTip('Run to set location')
self.set_stop_button = EmulatorButton(view, '⏹', self.set_stop)
self.set_stop_button.setToolTip('Set stop location on address')
self.pause_button = EmulatorButton(view, '⏸', self.pause)
self.pause_button.setToolTip('Pause emulator')
self.step_button = EmulatorButton(view, '⏯', self.step)
self.step_button.setToolTip('Step one disassembly instruction')
self.map_memory_button = EmulatorButton(view, '🗺', self.map_memory)
self.map_memory_button.setToolTip('Map virtual memory')
self.unmap_memory_button = EmulatorButton(view, '🚮', self.unmap_memory)
self.unmap_memory_button.setToolTip('Unmap virtual memory')
self.view_memory_button = EmulatorButton(view, '📈', self.view_memory)
self.view_memory_button.setToolTip('Open memory view')
self.add_hook_button = EmulatorButton(view, '🎣', self.add_hook)
self.add_hook_button.setToolTip('Add instruction hook')
self.remove_hook_button = EmulatorButton(view, '🐟', self.remove_hook)
self.remove_hook_button.setToolTip('Remove instruction hook')
self.button_layout = QHBoxLayout(self)
self.button_layout.addWidget(self.reset_button)
self.button_layout.addWidget(self.run_button)
self.button_layout.addWidget(self.pause_button)
self.button_layout.addWidget(self.run_to_button)
self.button_layout.addWidget(self.set_stop_button)
self.button_layout.addWidget(self.step_button)
self.button_layout.addWidget(self.map_memory_button)
self.button_layout.addWidget(self.unmap_memory_button)
self.button_layout.addWidget(self.view_memory_button)
self.button_layout.addWidget(self.add_hook_button)
self.button_layout.addWidget(self.remove_hook_button)
def get_context(self):
ctx = self.parent().view_frame.actionContext()
if ctx.lowLevelILFunction is not None:
function = ctx.lowLevelILFunction
if ctx.instrIndex == 0xffffffffffffffff:
il = function[0]
else:
il = function[ctx.instrIndex]
elif ctx.mediumLevelILFunction is not None:
if ctx.instrIndex == 0xffffffffffffffff:
il = ctx.mediumLevelILFunction[0].llil.non_ssa_form
else:
il = ctx.mediumLevelILFunction[
ctx.instrIndex
].llil.non_ssa_form
elif ctx.function is not None:
function = ctx.function
il = function.get_low_level_il_at(ctx.address)
return il
def run(self):
emulator = self.view.session_data['emulator']
il = self.get_context()
task = EmulatorRunTaskThread(self, emulator, il)
task.start()
def pause(self):
self.running = False
def run_to(self):
pass
def set_stop(self):
il = self.get_context()
emulator = self.view.session_data['emulator']
emulator.breakpoints.add((il.function, il.instr_index))
il.function.source_function.set_auto_instr_highlight(
il.address,
HighlightStandardColor.RedHighlightColor
)
def reset(self):
self.running = False
emulator = self.view.session_data['emulator']
if (emulator.current_function is not None and
emulator.current_instr_index is not None):
current_il = emulator.current_function[
emulator.current_instr_index
]
emulator.current_function.source_function.set_auto_instr_highlight(
current_il.address,
HighlightStandardColor.NoHighlightColor
)
self.view.session_data["emulator.memory.view"] = rewrite_segments(
self.view
)
model = EmulatorMemoryModel(self.view)
self.view.session_data["emulator.memory.model"] = model
self.view.session_data["emulator.memory.widget"].setModel(model)
model = EmulatorStackModel(self.view)
self.view.session_data['emulator.stack.widget'].setModel(model)
model = RegisterEmulatorModel(self.view)
self.view.session_data['emulator.registers.widget'].setModel(model)
self.view.session_data['emulator.registers.widget'].update()
def step(self):
ctx = self.parent().view_frame.actionContext()
emulator = self.parent().emulator
if ctx.lowLevelILFunction is not None:
function = ctx.lowLevelILFunction
if ctx.instrIndex == 0xffffffffffffffff:
il = function[0]
else:
il = function[ctx.instrIndex]
elif ctx.mediumLevelILFunction is not None:
if ctx.instrIndex == 0xffffffffffffffff:
il = ctx.mediumLevelILFunction[0].llil.non_ssa_form
else:
il = ctx.mediumLevelILFunction[
ctx.instrIndex
].llil.non_ssa_form
elif ctx.function is not None:
function = ctx.function
il = function.get_low_level_il_at(ctx.address)
emulator.set_next_instr_index(
il.function, il.instr_index
)
il_start = il.instr_index
exits = il.function.source_function.get_low_level_il_exits_at(
il.address
)
il_exit = max(
exits
) if exits else il_start
next_il = il
while (il.function == emulator.current_function and
il_start <= emulator.current_instr_index <= il_exit):
if not self.execute_one_instruction(emulator, next_il):
break
if emulator.current_instr_index < len(emulator.current_function):
next_il = emulator.current_function[
emulator.current_instr_index
]
else:
emulator.view.navigate(emulator.view.file.view, next_il.address)
def execute_one_instruction(self, emulator, il):
try:
emulator.execute(il)
except UninitializedRegisterError as e:
print(f'UninitializedRegisterError: {e.reg}')
return False
except UnimplementedOperationError as e:
print(f'UnimplementedOperationError: {e.op!r}')
return False
return True
def map_memory(self):
start = AddressField('Start (hex):')
length = AddressField('Length (hex):')
flags = ChoiceField(
'Flags',
[
'---',
'--x',
'-w-',
'-wx',
'r--',
'r-x',
'rw-',
'rwx'
]
)
get_form_input([start, length, flags], 'Map Memory')
self.parent().emulator.map_memory(
start.result,
length.result,
flags.result
)
def unmap_memory(self):
start = AddressField('Start (hex):')
length = AddressField('Length (hex):')
get_form_input([start, length], 'Unmap Memory')
self.parent().emulator.unmap_memory(start.result, length.result)
def view_memory(self):
memory_view = self.parent().view.session_data['emulator.memory.view']
ctx = UIContext.activeContext()
linear_view = LinearView(memory_view, None)
memory_view.register_notification(linear_view)
ctx.createTabForWidget('Emulator Memory', linear_view)
def add_hook(self):
emulator = self.parent().view.session_data['emulator']
ctx = UIContext.activeContext()
content = ctx.contentActionHandler()
action_context = content.actionContext()
llil = action_context.lowLevelILFunction
instr_index = action_context.instrIndex
if None in (llil, instr_index) or instr_index == 0xffffffffffffffff:
log.log_alert('LLIL Function/Instruction not selected!')
return
add_hook(emulator, llil[instr_index])
def remove_hook(self):
emulator = self.parent().view.session_data['emulator']
ctx = UIContext.activeContext()
content = ctx.contentActionHandler()
action_context = content.actionContext()
llil = action_context.lowLevelILFunction
instr_index = action_context.instrIndex
if None in (llil, instr_index) or instr_index == 0xffffffffffffffff:
log.log_alert('LLIL Function/Instruction not selected!')
return
remove_hook(emulator, llil[instr_index])
```
#### File: emulator/emulatorui/__init__.py
```python
from binaryninja import (
BinaryView,
LowLevelILFunction,
LowLevelILInstruction,
PluginCommand,
)
from binaryninjaui import DockHandler, LinearView
from . import hooks
from . import emulatorui
from . import memory
from .memory import EmulatorMemoryModel, rewrite_segments
from .stack import EmulatorStackModel
emulatorui.addDockWidget()
memory.addDockWidget()
def load_emulator(view, il):
emulator = view.session_data.get("emulator")
if emulator is None:
return
dock_handler = DockHandler.getActiveDockHandler()
if dock_handler is None:
return
view.session_data["emulator.memory.view"] = rewrite_segments(view)
model = EmulatorMemoryModel(view)
view.session_data["emulator.memory.model"] = model
view.session_data["emulator.memory.widget"].setModel(model)
model = EmulatorStackModel(view)
view.session_data['emulator.stack.widget'].setModel(model)
memory_dock_widget = view.session_data['emulator.memory.dockWidget']
memory_dock_widget.linear_view = LinearView(
view.session_data['emulator.memory.view'], None
)
memory_dock_widget.layout.addWidget(memory_dock_widget.linear_view)
dock_handler.setVisible("BNIL Emulator", True)
def add_hook(view: BinaryView, instruction: LowLevelILInstruction) -> None:
emulator = view.session_data.get("emulator")
if emulator is None:
return
hooks.add_hook(emulator, instruction)
def add_function_hook(view: BinaryView, function: LowLevelILFunction) -> None:
emulator = view.session_data.get("emulator")
if emulator is None:
return
hooks.add_function_hook(emulator, function)
def remove_hook(view: BinaryView, instruction: LowLevelILInstruction) -> None:
emulator = view.session_data.get("emulator")
if emulator is None:
return
hooks.remove_hook(emulator, instruction)
def remove_function_hook(
view: BinaryView, function: LowLevelILFunction
) -> None:
emulator = view.session_data.get("emulator")
if emulator is None:
return
hooks.remove_function_hook(emulator, function)
PluginCommand.register_for_low_level_il_function(
"Emulator\\Load", "Load Emulator", load_emulator
)
PluginCommand.register_for_low_level_il_instruction(
"Emulator\\Add Hook",
"Add an emulator hook for this LLIL instruction",
add_hook,
)
PluginCommand.register_for_low_level_il_function(
"Emulator\\Add Function Hook",
"Add an emulator hook for this LLIL function",
add_function_hook,
)
PluginCommand.register_for_low_level_il_instruction(
"Emulator\\Remove Hook",
"Remove an emulator hook for this LLIL instruction",
remove_hook,
)
PluginCommand.register_for_low_level_il_function(
"Emulator\\Remove Function Hook",
"Remove an emulator hook for this LLIL function",
remove_function_hook,
)
def map_memory(view, start, length, flags):
emulator = view.session_data.get("emulator")
if emulator is None:
return
emulator.map_memory(start, length, flags)
```
#### File: emulator/tests/test_binja.py
```python
from binaryninja import (BinaryView, LowLevelILFunction, PluginCommand,
SegmentFlag)
from emulator import Executor
def setup_stack(view: BinaryView, function: LowLevelILFunction) -> None:
emulator = view.session_data['emulator']
memory_view = view.session_data['emulator.memory.view']
map_start = 0x1000
map_len = 0x10000
while True:
while memory_view.get_segment_at(map_start) is not None:
map_start += 0x1000
if any(
s.start > map_start and
s.start < map_start + map_len
for s in memory_view.segments
):
map_start += 0x1000
continue
emulator.map_memory(
map_start,
map_len,
SegmentFlag.SegmentReadable | SegmentFlag.SegmentWritable
)
break
sp = map_start + map_len - view.address_size
emulator.write_register(view.arch.stack_pointer, sp)
PluginCommand.register_for_low_level_il_function(
'Emulator\\Setup stack',
'Setup Emulator Stack',
setup_stack,
lambda v, f: v.session_data.get('emulator') is not None
)
```
#### File: f-ing-around-with-binaryninja/function_types/test_function_types.py
```python
from binaryninja import Function, BinaryView, PluginCommand, MediumLevelILOperation, log, Type, FunctionParameter
def fix_printfs(view: BinaryView):
printf = view.get_symbols_by_name('_printf')
if not printf:
printf = view.get_symbols_by_name('printf')
if not printf:
return
for sym in printf:
function = view.get_function_at(sym.address)
if not function:
continue
xrefs = view.get_code_refs(function.start)
for xref in xrefs:
caller: Function = xref.function
call_mlil = caller.get_low_level_il_at(xref.address).mlil
print(call_mlil)
if call_mlil is None:
continue
fmt_operand = call_mlil.params[0]
if fmt_operand.operation == MediumLevelILOperation.MLIL_VAR:
log.log_warn(f"Potential format string bug: {fmt_operand.address:x}")
continue
elif fmt_operand.operation in (MediumLevelILOperation.MLIL_CONST_PTR, MediumLevelILOperation.MLIL_CONST):
fmt_address = fmt_operand.constant
fmt = view.get_ascii_string_at(fmt_address, 2)
if fmt is None:
continue
fmt_value = fmt.value
else:
continue
specifiers = fmt_value.split('%')
param_types = []
for specifier in specifiers[1:]:
if not specifier:
continue
if specifier.startswith('d'):
param_types.append(Type.int(4, sign=True))
elif specifier.startswith('s'):
param_types.append(Type.pointer(view.arch, Type.char()))
elif specifier.startswith('p'):
param_types.append(Type.pointer(view.arch, Type.void()))
else:
log.log_warn(f'Unknown format specifier: {specifier}; skipping')
param_types.append(Type.pointer(view.arch, Type.void()))
param_idx = 1
params = [FunctionParameter(Type.pointer(view.arch, Type.char()), 'fmt')]
for param in param_types:
params.append(FunctionParameter(param, f'arg{param_idx}'))
param_idx += 1
caller.set_call_type_adjustment(xref.address, Type.function(Type.int(4), params))
PluginCommand.register(
'Fix up printf signatures',
'Fix up printf signatures so that the variadic arguments are correctly typed',
fix_printfs
)
```
#### File: unlock/unlock/unlockvisitor.py
```python
import operator as op
import time
from functools import partial
from queue import Queue
from threading import Event
from math import floor
from binaryninja import (
AnalysisCompletionEvent,
Architecture,
ArchitectureHook,
BackgroundTaskThread,
BasicBlock,
BinaryDataNotification,
BinaryReader,
BinaryView,
BranchType,
Function,
FunctionAnalysisSkipOverride,
ILBranchDependence,
InstructionBranch,
InstructionInfo,
LowLevelILBasicBlock,
LowLevelILExpr,
LowLevelILFunction,
LowLevelILOperation,
LowLevelILInstruction,
MediumLevelILBasicBlock,
MediumLevelILFunction,
MediumLevelILInstruction,
MediumLevelILOperation,
PluginCommand,
RegisterValueType,
SectionSemantics,
SSAVariable,
Variable,
VariableSourceType,
enum,
)
from binaryninja import _binaryninjacore as core
from binaryninja import log_debug, log_info, log_warn, worker_enqueue
from .analysis.analyze_exception_handler import (
analyze_exception_handler_set_var,
analyze_exception_handler_store,
)
from .analysis.analyze_folding import analyze_constant_folding, analyze_goto_folding
from .analysis.analyze_indirect_jump import analyze_indirect_jump, analyze_possible_call
from .analysis.analyze_return import analyze_return
from .analysis.analyze_unconditional_jump import analyze_unconditional_jump
from .bnilvisitor import BNILVisitor
from .logging import log_debug
from .state import SEHState
from .exceptionvisitor import ExceptionVisitor
class TargetQueue(Queue):
def put(self, item, block=True, timeout=None):
log_debug(f"putting {item:x} in target queue")
super(TargetQueue, self).put(item, block, timeout)
class UnlockVisitor(BNILVisitor, BackgroundTaskThread):
def __init__(self, function: Function, start: int):
BNILVisitor.__init__(self)
BackgroundTaskThread.__init__(self, f"Deobfuscating {start:x}", True)
self._start: int = start
self.function: Function = function
self.view: BinaryView = function.view
self.address_size = self.view.arch.address_size
self.target_queue = TargetQueue()
self.exception_visitors = {
f.start: ExceptionVisitor(self) for f in self.view.functions
}
self.seen = {}
self.prev_phase = 1
self.num_phases = 3
self.phase = 1
self.target_queue.put(start)
def run(self):
self.run_time = time.time()
while self.phase:
self.start_time = time.time()
while not self.target_queue.empty():
self.addr = None
while not self.target_queue.empty():
self.addr = self.target_queue.get()
if self.addr is not None:
# Attempt to navigate to the location; if we
# can't, then it's not a valid instruction
# currently
# valid = self.view.navigate(self.view.file.view, self.addr)
log_debug(f"checking validity of {self.addr:x}")
valid = (
self.view.get_functions_containing(self.addr) is not None
)
if not valid:
log_debug(f"{self.addr:x} is not valid")
self.addr = None
continue
else:
break
else:
log_debug("target queue has been exhausted")
break
log_debug(f"run for {self.addr:x} started")
# Get a new copy of our Function object, since reanalyzing might
# make dataflow stale
log_debug(f"Getting new function for {self.addr:x}")
self.function = next(
f for f in self.view.get_functions_containing(self.addr)
)
self.fs = Variable(
self.function,
VariableSourceType.RegisterVariableSourceType,
0,
self.function.arch.get_reg_index("fs"),
"fs",
)
il = self.function.get_low_level_il_at(self.addr).mapped_medium_level_il
mmlil = il.function
self.progress = f"[Phase {self.phase}] {self.addr:x} in function {self.function.start:x} ({il.instr_index}/{len(list(mmlil.instructions))})"
while True:
log_debug(
f"[{self.function.start:08x} analyzing {il.instr_index}[{il.address:08x}]: {il}"
)
# self.function.analysis_skipped = True
self.view.begin_undo_actions()
self.seen[il.address] = self.seen.get(il.address, 0) + 1
process_result = self.visit(il)
self.view.commit_undo_actions()
# self.function.analysis_skipped = False
# If it's True or False, then we've finished
# processing this path and want to continue
# processing other paths. If it's an integer,
# then that's the next IL instruction index we
# should analyze. If it's None, then just continue
# on to the next instruction.
if isinstance(process_result, bool):
break
elif isinstance(process_result, int):
next_il = process_result
else:
next_il = il.instr_index + 1
try:
il = mmlil[next_il]
except:
break
log_debug(f"analysis for {il.address:x} finished")
# If process_result is True or False, then something
# was modified and we should update.
if process_result is not None:
log_debug("waiting for analysis to finish")
self.view.update_analysis_and_wait()
log_debug("analysis complete")
# If an analysis forces a phase change, note it
if self.phase != self.prev_phase:
self.end_time = time.time()
log_info(
f"Phase changed from {self.prev_phase} to {self.phase}; Time elapsed: {self.end_time - self.start_time}"
)
self.prev_phase = self.phase
self.start_time = time.time()
log_debug("target queue is empty")
self.end_time = time.time()
log_info(
f"Phase {self.phase} complete; Time elapsed: {self.end_time - self.start_time}"
)
# Iterate the phase. If it hits 0, it will stop
self.prev_phase = self.phase
self.phase = (self.phase + 1) % (self.num_phases + 1)
for func in self.view.functions:
self.target_queue.put(func.start)
print(f"Analysis complete; Time elapsed: {time.time() - self.run_time}")
visit_MLIL_RET = analyze_return
visit_MLIL_RET_HINT = analyze_return
def visit_MLIL_JUMP(self, expr):
result = self.visit(expr.dest.llil)
if result is True:
return result
return self.analyze_indirect_jump(expr)
def visit_MLIL_JUMP_TO(self, expr):
if self.analyze_possible_call(expr):
return True
return self.visit(expr.dest.llil)
visit_MLIL_GOTO = analyze_goto_folding
def visit_MLIL_STORE(self, expr):
return self.exception_visitors[self.function.start].visit(expr)
def visit_MLIL_SET_VAR(self, expr):
if self.phase == 1:
return self.exception_visitors[self.function.start].visit(expr)
elif self.phase > 1:
if expr.src.operation == MediumLevelILOperation.MLIL_VAR and expr.dest == expr.src.src:
self.convert_to_nop(expr.address)
return self.queue_prev_block(expr)
llil_instr = expr.llil
if llil_instr.operation == LowLevelILOperation.LLIL_SET_REG_SSA:
if not expr.function.llil.get_ssa_reg_uses(llil_instr.dest):
if (
llil_instr.non_ssa_form.operation
== LowLevelILOperation.LLIL_SET_REG
):
if (
llil_instr.non_ssa_form.src.operation
!= LowLevelILOperation.LLIL_POP
):
self.convert_to_nop(expr.address)
return self.queue_prev_block(expr)
elif expr.src.operation == MediumLevelILOperation.MLIL_VAR:
pop_var = expr.src.ssa_form.src
push_var_def = expr.ssa_form.function.get_ssa_var_definition(pop_var)
if expr.function.get_ssa_var_uses(push_var_def.dest) == [
expr
]:
self.convert_to_nop(expr.address)
self.convert_to_nop(push_var_def.address)
return self.queue_prev_block(expr)
return self.visit(expr.src)
def visit_LLIL_REG_SSA(self, expr):
log_debug("visit_LLIL_REG_SSA")
if expr.value.type in (
RegisterValueType.ConstantPointerValue,
RegisterValueType.ConstantValue,
):
return self.analyze_constant_folding(expr)
def visit_MLIL_SET_VAR_FIELD(self, expr):
if self.phase > 1:
llil_instr = expr.llil
if llil_instr.operation == LowLevelILOperation.LLIL_SET_REG_SSA_PARTIAL:
if not expr.function.llil.get_ssa_reg_uses(llil_instr.full_reg):
if (
llil_instr.non_ssa_form.operation
== LowLevelILOperation.LLIL_SET_REG
):
if (
llil_instr.non_ssa_form.src.operation
!= LowLevelILOperation.LLIL_POP
):
self.convert_to_nop(expr.address)
return self.queue_prev_block(expr)
return self.visit(expr.src)
def visit_MLIL_IF(self, expr):
log_debug("visit_MLIL_IF")
# is this a stosb or something similar? If so,
# find the largest exit index and start there.
exits = self.function.get_low_level_il_exits_at(expr.address)
if len(exits) > 1:
return max(exits) + 1
return self.analyze_unconditional_jump(expr)
def visit_MLIL_UNDEF(self, expr):
log_debug("visit_MLIL_UNDEF")
# Nothing to do down this path; just get something
# else from the target queue.
return False
def visit_LLIL_LOAD_SSA(self, expr):
return self.visit(expr.src)
def visit_LLIL_ADD(self, expr):
log_debug("visit_LLIL_ADD")
add_value = expr.value
if add_value.type in (
RegisterValueType.ConstantPointerValue,
RegisterValueType.ConstantValue,
):
log_debug(f"add value is {add_value.value:x}")
return self.analyze_constant_folding(expr.left)
else:
log_debug(f"add value is not constant ptr")
return
def visit_LLIL_SUB(self, expr):
log_debug("visit_LLIL_SUB")
sub_value = expr.value
if sub_value.type in (
RegisterValueType.ConstantPointerValue,
RegisterValueType.ConstantValue,
):
log_debug(f"sub value is {sub_value.value:x}")
return self.analyze_constant_folding(expr.left)
else:
log_debug(f"sub value is not constant ptr")
return
def visit_MLIL_SUB(self, expr):
log_debug("visit_MLIL_SUB")
# This is a top level MLIL_SUB, which means it's probably a cmp instruction
if expr.function[expr.instr_index].operation == MediumLevelILOperation.MLIL_SUB:
self.convert_to_nop(expr.address)
return self.queue_prev_block(expr)
if expr.left.value.type in (
RegisterValueType.UndeterminedValue,
RegisterValueType.EntryValue,
):
# Make sure we're not accidentally NOPing a push/pop
# due to the stack being in a bad state due to a weird
# loop
if (
expr.left.operation != MediumLevelILOperation.MLIL_VAR
and expr.left.src.index != self.view.arch.get_reg_index("esp")
):
self.convert_to_nop(expr.address)
return self.queue_prev_block(expr)
sub_value = expr.value
if sub_value.type in (
RegisterValueType.ConstantPointerValue,
RegisterValueType.ConstantValue,
):
log_debug(f"sub value is {sub_value.value:x}")
return self.analyze_constant_folding(expr.left)
else:
log_debug("sub value is not a constant ptr")
return
def visit_MLIL_ADD(self, expr):
log_debug("visit_MLIL_ADD")
if expr.left.value.type in (
RegisterValueType.UndeterminedValue,
RegisterValueType.EntryValue,
):
self.convert_to_nop(expr.address)
return self.queue_prev_block(expr)
add_value = expr.value
if add_value.type in (
RegisterValueType.ConstantPointerValue,
RegisterValueType.ConstantValue,
):
log_debug(f"add value is {add_value.value:x}")
return self.analyze_constant_folding(expr.left)
else:
log_debug("add value is not a constant ptr")
return
def visit_MLIL_CONST(self, expr):
log_debug("visit_MLIL_CONST")
if expr.llil.operation != LowLevelILOperation.LLIL_CONST:
return self.visit(expr.llil)
def visit_MLIL_XOR(self, expr):
log_debug("visit_MLIL_XOR")
# If it's something like `ecx ^ const` and ecx isn't a known
# value, then just erase it. It's not needed at all.
if expr.left.value.type in (
RegisterValueType.UndeterminedValue,
RegisterValueType.EntryValue,
):
self.convert_to_nop(expr.address)
return self.queue_prev_block(expr)
visit_MLIL_AND = visit_MLIL_XOR
def visit_MLIL_OR(self, expr):
log_debug("visit_MLIL_OR")
# If it's something like `ecx | 0` then we can NOP it
# and nothing of value is lost
if expr.right.value.type in (
RegisterValueType.ConstantPointerValue,
RegisterValueType.ConstantValue
) and expr.right.value.value == 0:
self.convert_to_nop(expr.address)
return self.queue_prev_block(expr)
def visit_MLIL_TAILCALL(self, expr):
log_debug("visit_MLIL_TAIL_CALL")
return self.visit(expr.dest.llil)
visit_MLIL_TAILCALL_UNTYPED = visit_MLIL_TAILCALL
analyze_unconditional_jump = analyze_unconditional_jump
analyze_indirect_jump = analyze_indirect_jump
analyze_goto_folding = analyze_goto_folding
analyze_constant_folding = analyze_constant_folding
analyze_possible_call = analyze_possible_call
def convert_to_nop(self, address):
log_debug(f"Nopping {address:x}")
self.view.convert_to_nop(address)
def queue_prev_block(self, expr):
log_debug("queue_prev_block")
if isinstance(expr, MediumLevelILInstruction):
ILBasicBlock = MediumLevelILBasicBlock
elif isinstance(expr, LowLevelILInstruction):
ILBasicBlock = LowLevelILBasicBlock
else:
return
current_bb: ILBasicBlock = next(
bb
for bb in expr.function.basic_blocks
if bb.start <= expr.instr_index < bb.end
)
log_debug(f"current_bb has {len(current_bb.incoming_edges)} incoming edges")
if len(current_bb.incoming_edges) != 1:
log_debug("Incoming Edges was not 1, just continuing")
self.target_queue.put(expr.address)
return True
prev_bb = current_bb.incoming_edges[0].source
while prev_bb[0].operation in (
LowLevelILOperation.LLIL_JUMP_TO,
MediumLevelILOperation.MLIL_JUMP_TO,
MediumLevelILOperation.MLIL_GOTO,
LowLevelILOperation.LLIL_GOTO,
):
if len(prev_bb.incoming_edges) != 1:
log_debug("Incoming edges was not 1, stopping here")
break
log_debug(f"{prev_bb.incoming_edges}")
if prev_bb not in prev_bb.incoming_edges[0].source.dominators:
prev_bb = prev_bb.incoming_edges[0].source
else:
break
self.target_queue.put(prev_bb.il_function[prev_bb.start].address)
return True
``` |
{
"source": "joshwatson/reverse_engineers_toolkit",
"score": 2
} |
#### File: reverse_engineers_toolkit/functions/callgraph.py
```python
from binaryninja import FlowGraph, BinaryDataNotification
from binaryninja.binaryview import BinaryView
from binaryninja.enums import BranchType, InstructionTextTokenType, SymbolType
from binaryninja.flowgraph import FlowGraphNode
from binaryninja.function import DisassemblyTextLine, Function, InstructionTextToken
from binaryninjaui import FlowGraphWidget, ViewType
class CallGraph(FlowGraph):
def __init__(self, function: Function):
FlowGraph.__init__(self)
self.function = function
self.view = function.view
def populate_nodes(self):
func = self.function
view = self.view
nodes = {f: FlowGraphNode(self) for f in view.functions}
for function, node in nodes.items():
if function.symbol.type == SymbolType.ImportedFunctionSymbol:
token_type = InstructionTextTokenType.ImportToken
else:
token_type = InstructionTextTokenType.CodeSymbolToken
node.lines = [
DisassemblyTextLine(
[InstructionTextToken(token_type, function.name, function.start)],
function.start,
)
]
self.append(node)
for function in view.functions:
node = nodes[function]
for callee in set(function.callees):
callee_node = nodes[callee]
node.add_outgoing_edge(BranchType.IndirectBranch, callee_node)
def update(self):
return CallGraph(self.function)
class CallGraphWidget(FlowGraphWidget, BinaryDataNotification):
def __init__(self, parent, view: BinaryView):
self.view = view
if view.entry_function:
self.graph = CallGraph(view.entry_function)
elif view.functions:
self.graph = CallGraph(view.functions[0])
else:
self.graph = None
FlowGraphWidget.__init__(self, parent, view, self.graph)
BinaryDataNotification.__init__(self)
view.register_notification(self)
def navigate(self, address):
self.showAddress(address, True)
return True
def navigateToFunction(self, function, address):
self.showAddress(address, True)
return True
def function_added(self, view, func):
self.graph = self.graph.update()
self.setGraph(self.graph)
def function_removed(self, view, func):
self.graph = self.graph.update()
self.setGraph(self.graph)
def function_updated(self, view, func):
self.graph = self.graph.update()
self.setGraph(self.graph)
class CallGraphViewType(ViewType):
def __init__(self):
ViewType.__init__(self, "Call Graph", "Call Graph View")
def getPriority(self, data, filename):
if data.functions:
return 1
return 0
def create(self, data, view_frame):
return CallGraphWidget(view_frame, data)
``` |
{
"source": "joshwcomeau/knausj_talon",
"score": 3
} |
#### File: knausj_talon/code/switcher.py
```python
from talon import app, Module, Context, actions, ui,imgui
from talon.voice import Capture
import re
import time
import os
# Construct at startup a list of overides for application names (similar to how homophone list is managed)
# ie for a given talon recognition word set `one note`, recognized this in these switcher functions as `ONENOTE`
# the list is a comma seperated `<Recognized Words>, <Overide>`
#TODO: Consider put list csv's (homophones.csv, app_name_overrides.csv) files together in a seperate directory,`knausj_talon/lists`
cwd = os.path.dirname(os.path.realpath(__file__))
overrides_file = os.path.join(cwd, "app_names", f"app_name_overrides.{app.platform}.csv")
overrides ={}
with open(overrides_file, "r") as f:
for line in f:
line = line.rstrip()
line = line.split(",")
overrides[line[0].lower()] = line[1].strip()
print(f'knausj_talon.switcher------------ app name overrides:{overrides}')
app_cache = {}
mod = Module()
mod.list('running', desc='all running applications')
mod.list('launch', desc='all launchable applications')
@mod.capture
def running_applications(m) -> str:
"Returns a single application name"
@mod.capture
def launch_applications(m) -> Capture:
"Returns a single application name"
ctx = Context()
@ctx.capture(rule='{self.running}')
def running_applications(m):
return m.running
@ctx.capture(rule='{self.launch}')
def launch_applications(m):
return m.launch
def split_camel(word):
return re.findall(r'[0-9A-Z]*[a-z]+(?=[A-Z]|$)', word)
def get_words(name):
words = re.findall(r'[0-9A-Za-z]+', name)
out = []
for word in words:
out += split_camel(word)
return out
@mod.action_class
class Actions:
def switcher_focus(name: str):
"""Focus a new application by name"""
for app in ui.apps():
# print(f"--------- app.name:{app.name} app.bundler:{app.bundle}")
if name in app.name and not app.background:
app.focus()
break
def switcher_launch(path: str):
"""Launch a new application by path"""
ui.launch(path=path)
def switcher_list_running():
"""Lists all running applications"""
gui.show()
def switcher_hide_running():
"""Hides list of running applications"""
gui.hide()
@imgui.open(software=False)
def gui(gui: imgui.GUI):
gui.text("Names of running applications")
gui.line()
for line in ctx.lists['self.running']:
gui.text(line)
def update_lists():
running = {}
launch = {}
for cur_app in ui.apps(background=False):
name = cur_app.name
if name.endswith('.exe'):
name = name.rsplit('.', 1)[0]
words = get_words(name)
for word in words:
if word and not word in running:
running[word.lower()] = cur_app.name
running[name.lower()] = cur_app.name
for override in overrides:
running[override] = overrides[override]
if app.platform == "mac":
for base in '/Applications', '/Applications/Utilities':
for name in os.listdir(base):
path = os.path.join(base, name)
name = name.rsplit('.', 1)[0].lower()
launch[name] = path
words = name.split(' ')
for word in words:
if word and word not in launch:
if len(name) > 6 and len(word) < 3:
continue
launch[word] = path
lists = {
'self.running': running,
'self.launch': launch,
}
#batch update lists
ctx.lists.update(lists)
def ui_event(event, arg):
if event in ('app_activate', 'app_launch', 'app_close', 'win_open', 'win_close'):
# print(f'------------------ event:{event} arg:{arg}')
update_lists()
ui.register('', ui_event)
update_lists()
``` |
{
"source": "joshwearssocks/midnite_conext_monitor",
"score": 2
} |
#### File: joshwearssocks/midnite_conext_monitor/main.py
```python
from modbus_control import ModbusControl
from conext_regmap import Conext, BinaryState
from midnite_classic_regmap import MidniteClassic
from system_manager import SystemManager, DeviceInfo, DATA_FIELDS
from influxdb import InfluxDBClient
import dataclasses
import signal
import time
from typing import Dict, Union, Callable, Optional
from enum import Enum, auto
import logging
CLASSIC_MODBUS_ADDR = 1
CLASSIC_IP = '192.168.1.10'
CLASSIC_PORT = 502
CLASSIC_NAME = 'Midnite Classic'
CONEXT_MODBUS_ADDR = 10
CONEXT_GW_IP = '192.168.2.227'
CONEXT_GW_PORT = 503
CONEXT_NAME = 'Conext XW6848'
INFLUXDB_IP = '192.168.1.2'
INFLUXDB_PORT = 8086
INFLUXDB_DB = 'energy'
logging.basicConfig(level=logging.INFO)
classic = ModbusControl(CLASSIC_MODBUS_ADDR, CLASSIC_IP, CLASSIC_PORT)
conext = ModbusControl(CONEXT_MODBUS_ADDR, CONEXT_GW_IP, CONEXT_GW_PORT)
influx_client = InfluxDBClient(host=INFLUXDB_IP, port=INFLUXDB_PORT, database=INFLUXDB_DB)
class SystemState(Enum):
Waiting_For_Charge = auto()
Invert = auto()
Invert_Sell = auto()
Unknown = auto()
class InverterStateMachine:
system_state = SystemState.Unknown
state_change_time = time.time()
def __init__(self, influx_client: Optional[InfluxDBClient]) -> None:
self.influx_client = influx_client
self.logger = logging.getLogger(self.__class__.__name__)
def update_state(self, state: SystemState) -> None:
"""Writes system state to influxdb."""
self.logger.info(f"Changing system state to {state._name_}")
self.system_state = state
self.state_change_time = time.time()
if self.influx_client:
json_body = [
{
"measurement": "System",
"fields": {
"state": self.system_state._name_
}
}
]
self.influx_client.write_points(json_body)
def detect_initial_state(self, grid_support: str, maximum_sell_amps: float) -> SystemState:
"""Tries to determine what the current system state is."""
if grid_support == 'Disable':
return SystemState.Waiting_For_Charge
elif maximum_sell_amps == 0:
return SystemState.Invert
elif maximum_sell_amps > 0:
return SystemState.Invert_Sell
return SystemState.Unknown
def control_inverter(self, data_dict: Dict[str,DATA_FIELDS]) -> None:
"""Adjusts inverter settings to optimize solar and battery consumption.
This may be better suited for home automation software such as HomeAssistant,
but control of such critical components seems logical to keep in a more
standalone script.
"""
# Only run if both devices are available
if data_dict[CLASSIC_NAME] is None or data_dict[CONEXT_NAME] is None:
return
# Recall some key parameters
soc = data_dict[CLASSIC_NAME]['battery_soc']
watts = data_dict[CLASSIC_NAME]['watts']
maximum_sell_amps = data_dict[CONEXT_NAME]['maximum_sell_amps']
grid_support = data_dict[CONEXT_NAME]['grid_support']
grid_support_voltage = data_dict[CONEXT_NAME]['grid_support_voltage']
v_batt = data_dict[CLASSIC_NAME]['v_batt']
inverter_status = data_dict[CONEXT_NAME]['inverter_status']
combo_charge_stage = data_dict[CLASSIC_NAME]['combo_charge_stage']
# If state is unknown, figure out what the active state is
if self.system_state == SystemState.Unknown:
self.system_state = self.detect_initial_state(grid_support, maximum_sell_amps)
self.logger.info(f"Initial state appears to be {self.system_state._name_}")
# Manage state transitions
try:
# Start selling if it's sunny and it has been 5 minutes since the last state transition
if self.system_state == SystemState.Invert and v_batt > 56 and (time.time() - self.state_change_time > 60):
conext.connect()
conext.set_register(Conext.grid_support_voltage, 55.6)
conext.set_register(Conext.maximum_sell_amps, 21)
self.update_state(SystemState.Invert_Sell)
# Stop selling if we don't have excess power
elif self.system_state == SystemState.Invert_Sell and (watts < 1000 or inverter_status == 'AC_Pass_Through'):
conext.connect()
conext.set_register(Conext.grid_support_voltage, 47)
conext.set_register(Conext.maximum_sell_amps, 0)
self.update_state(SystemState.Invert)
# Stop inverting if battery SOC is too low
elif grid_support == 'Enable' and soc < 60:
conext.connect()
conext.set_register(Conext.grid_support, BinaryState.Disable)
self.update_state(SystemState.Waiting_For_Charge)
# Start inverting again if the charge controller is in absorb state
elif grid_support == 'Disable' and combo_charge_stage == 'Absorb':
conext.connect()
conext.set_register(Conext.grid_support, BinaryState.Enable)
conext.set_register(Conext.grid_support_voltage, 47)
conext.set_register(Conext.maximum_sell_amps, 0)
self.update_state(SystemState.Invert)
# Never fail
except (ValueError, ConnectionError) as e:
print(f"Failed to perform state transition: {e}")
finally:
conext.disconnect()
if __name__ == '__main__':
devices = [
DeviceInfo(name=CLASSIC_NAME, control=classic, regmap=MidniteClassic),
DeviceInfo(name=CONEXT_NAME, control=conext, regmap=Conext)
]
manager = SystemManager(devices, influx_client)
state_machine = InverterStateMachine(influx_client)
manager.add_callback(state_machine.control_inverter)
# Start a timer with a 10 second period for monitoring the system
manager.start()
# Idle forever
while True:
time.sleep(1)
``` |
{
"source": "joshwearssocks/rplidar_area_scanner",
"score": 2
} |
#### File: joshwearssocks/rplidar_area_scanner/file_pc_viewer.py
```python
import numpy as np
import argparse
import math
import pathlib
import dataclasses
import pyqtgraph as pg
import pyqtgraph.opengl as gl
from pyqtgraph import functions as fn
from pyqtgraph.Qt import QtCore, QtWidgets
from typing import List
REPORTED_TICKS_PER_REV = 20390
@dataclasses.dataclass
class ScanParams:
ticks_per_rev: int = 20400
up_pitch: float = 90.65
z_tilt: float = 0.54 #0.48
forward_offset: float = 14.5
sideways_offset: float = -1.0
end_point_trim: int = 20
class PointCloudParser:
def __init__(self, txt_path: pathlib.Path):
self.txt_path = txt_path
# Parse the file into lists
self.pitches: List[float] = []
self.distances: List[float] = []
self.qualities: List[int] = []
self.ticks: List[int] = []
self.read_txt_file(self.txt_path)
self.points = np.zeros((len(self.pitches),3))
self.normals = np.zeros((len(self.pitches),3))
self.colors = np.zeros((len(self.pitches),4))
self.prev_ticks = 0
self.tick_rollovers = 0
self.params = ScanParams()
self.parse_all_points()
def read_txt_file(self, txt_path: pathlib.Path):
with open(txt_path, 'r') as txt_file:
lines = txt_file.readlines()
for line in lines:
pitch_str, dist_str, quality_str, ticks_str = line.split(',')
self.pitches.append(float(pitch_str))
self.distances.append(float(dist_str))
self.qualities.append(int(quality_str))
self.ticks.append(int(ticks_str))
def parse_point(self, pitch, dist, ticks):
"""Convert a line to a 3D cartesian point.
__
( --)- <-- FORWARD_OFFSET
|----------------|
"""
if dist == 0:
return None
pitch_corr = math.radians(pitch) + math.radians(self.params.up_pitch)
if abs(dist * math.sin(pitch_corr)) < 30:
return None
if ticks < self.prev_ticks:
self.tick_rollovers += 1
self.prev_ticks = ticks
total_ticks = REPORTED_TICKS_PER_REV * self.tick_rollovers + ticks
theta_pre_tilt = math.radians(360 * (total_ticks % self.params.ticks_per_rev) / self.params.ticks_per_rev)
phi_pre_tilt = pitch_corr
dtheta = math.atan(math.sin(math.radians(self.params.z_tilt))*math.cos(phi_pre_tilt) / math.sin(phi_pre_tilt))
theta = theta_pre_tilt + dtheta
#phi = math.atan(math.sin(phi_pre_tilt)/(math.cos(math.radians(self.params.z_tilt))*math.cos(phi_pre_tilt)*math.cos(dtheta)))
phi = phi_pre_tilt
z = dist * math.cos(phi)
x = (dist * math.sin(phi) + self.params.forward_offset) * math.cos(theta) + (math.sin(theta) * self.params.sideways_offset)
y = (dist * math.sin(phi) + self.params.forward_offset) * math.sin(theta) - (math.cos(theta) * self.params.sideways_offset)
hemisphere = 0
if total_ticks % self.params.ticks_per_rev > self.params.ticks_per_rev / 2:
hemisphere = 1
nz = -1 * math.cos(phi)
nx = -1 * math.sin(phi) * math.cos(theta)
ny = -1 * math.sin(phi) * math.sin(theta)
return ((x, y, z), (nx, ny, nz), hemisphere)
def parse_all_points(self):
self.prev_ticks = 0
self.tick_rollovers = 0
for i in range(len(self.pitches)):
# Filter out the first and last 20 points
if i < self.params.end_point_trim or len(self.pitches) - i < self.params.end_point_trim:
self.points[i,:] = (0,0,0)
self.normals[i,:] = (0,0,0)
self.colors[i,:] = (0,0,0,0)
continue
tup = self.parse_point(self.pitches[i], self.distances[i], self.ticks[i])
if not tup:
continue
self.points[i,:] = tup[0]
self.normals[i,:] = tup[1]
col = int(150 * i / len(self.pitches)) + 100
self.colors[i,:] = ((355-col)/256, (tup[2]*255)/256, col/256, 0.8)
def write_ply_file(self) -> None:
with open(f"{self.txt_path.stem}.ply", 'w') as f:
f.write('ply\n')
f.write('format ascii 1.0\n')
f.write(f'element vertex {len(self.pitches)}\n')
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('property float nx\n')
f.write('property float ny\n')
f.write('property float nz\n')
f.write('property uchar red\n')
f.write('property uchar green\n')
f.write('property uchar blue\n')
f.write('end_header\n')
for i in range(len(self.pitches)):
f.write(f'{self.points[i,0]:.3f} {self.points[i,1]:.3f} {self.points[i,2]:.3f} '
f'{self.normals[i,0]:.3f} {self.normals[i,1]:.3f} {self.normals[i,2]:.3f} '
f'{self.colors[i,0]:.3f} {self.colors[i,1]:.3f} {self.colors[i,2]:.3f}')
class ScanVisualizer(QtWidgets.QWidget):
def __init__(self, pcp: PointCloudParser):
super().__init__()
"""
ticks_per_rev: int = 20400
up_pitch: float = 90.65
z_tilt: float = 0.48
forward_offset: float = 14
"""
self.gl = gl.GLViewWidget()
self.pcp = pcp
self.spin_ticks_per_rev = QtWidgets.QSpinBox()
self.spin_ticks_per_rev.setMaximum(30000)
self.spin_ticks_per_rev.setValue(self.pcp.params.ticks_per_rev)
self.spin_up_pitch = QtWidgets.QDoubleSpinBox()
self.spin_up_pitch.setValue(self.pcp.params.up_pitch)
self.spin_up_pitch.setSingleStep(0.05)
self.spin_z_tilt = QtWidgets.QDoubleSpinBox()
self.spin_z_tilt.setValue(self.pcp.params.z_tilt)
self.spin_z_tilt.setSingleStep(0.02)
self.spin_forward_offset = QtWidgets.QDoubleSpinBox()
self.spin_forward_offset.setSingleStep(0.1)
self.spin_forward_offset.setValue(self.pcp.params.forward_offset)
self.spin_sideways_offset = QtWidgets.QDoubleSpinBox()
self.spin_sideways_offset.setMinimum(-10.)
self.spin_sideways_offset.setSingleStep(0.1)
self.spin_sideways_offset.setValue(self.pcp.params.sideways_offset)
self.spin_end_point_trim = QtWidgets.QSpinBox()
self.spin_end_point_trim.setMaximum(100)
self.spin_end_point_trim.setValue(self.pcp.params.end_point_trim)
self.form_layout = QtWidgets.QFormLayout()
self.form_layout.addRow(("Ticks per Rev"), self.spin_ticks_per_rev)
self.form_layout.addRow(("Up Pitch"), self.spin_up_pitch)
self.form_layout.addRow(("Z Tilt"), self.spin_z_tilt)
self.form_layout.addRow(("Forward Offset"), self.spin_forward_offset)
self.form_layout.addRow(("Sideways Offset"), self.spin_sideways_offset)
self.form_layout.addRow(("Endpoint Trim"), self.spin_end_point_trim)
self.button_open = QtWidgets.QPushButton("Open")
self.button_export = QtWidgets.QPushButton("Export")
self.sidebar_layout = QtWidgets.QVBoxLayout()
self.sidebar_layout.addLayout(self.form_layout)
self.sidebar_layout.addWidget(self.button_open)
self.sidebar_layout.addWidget(self.button_export)
self.layout = QtWidgets.QHBoxLayout(self)
self.layout.addWidget(self.gl, 5)
self.layout.addLayout(self.sidebar_layout, 1)
self.spin_ticks_per_rev.valueChanged.connect(self.update)
self.spin_up_pitch.valueChanged.connect(self.update)
self.spin_z_tilt.valueChanged.connect(self.update)
self.spin_forward_offset.valueChanged.connect(self.update)
self.spin_sideways_offset.valueChanged.connect(self.update)
self.spin_end_point_trim.valueChanged.connect(self.update)
self.button_open.released.connect(self.open)
self.button_export.released.connect(self.save)
self.scatter = gl.GLScatterPlotItem(pos=self.pcp.points, color=self.pcp.colors, size=0.01, pxMode=False)
self.gl.addItem(self.scatter)
@QtCore.Slot()
def update(self):
self.pcp.params.ticks_per_rev = self.spin_ticks_per_rev.value()
self.pcp.params.up_pitch = self.spin_up_pitch.value()
self.pcp.params.z_tilt = self.spin_z_tilt.value()
self.pcp.params.forward_offset = self.spin_forward_offset.value()
self.pcp.params.sideways_offset = self.spin_sideways_offset.value()
self.pcp.params.end_point_trim = self.spin_end_point_trim.value()
self.pcp.parse_all_points()
self.scatter.setData(pos=self.pcp.points, color=self.pcp.colors)
@QtCore.Slot()
def open(self):
dlg = QtWidgets.QFileDialog()
dlg.setNameFilter("Text files (*.txt)")
dlg.setFileMode(QtWidgets.QFileDialog.ExistingFile)
if dlg.exec():
file_name = dlg.selectedFiles()[0]
self.pcp.__init__(file_name)
self.update()
@QtCore.Slot()
def save(self):
self.pcp.write_ply_file()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('text_file', type=str, help="Text file containing point strings")
p = parser.parse_args()
pcp = PointCloudParser(pathlib.Path(p.text_file))
app = pg.mkQApp("3D Scanner Parameter Visualizer")
widget = ScanVisualizer(pcp)
widget.resize(800, 600)
widget.show()
pg.exec()
``` |
{
"source": "joshweir/bert-extractive-summarizer",
"score": 3
} |
#### File: bert-extractive-summarizer/summarizer/model_processors.py
```python
from summarizer.BertParent import BertParent
from typing import List
from summarizer.ClusterFeatures import ClusterFeatures
from abc import abstractmethod
import neuralcoref
from spacy.lang.en import English
import json
class ModelProcessor(object):
def __init__(self,
model='bert-large-uncased',
hidden: int = -2,
reduce_option: str = 'mean',
greedyness: float = 0.45):
self.model = BertParent(model)
self.hidden = hidden
self.reduce_option = reduce_option
self.nlp = English()
self.nlp.add_pipe(self.nlp.create_pipe('sentencizer'))
neuralcoref.add_to_pipe(self.nlp, greedyness=greedyness)
def process_content_sentences(self, body: str, min_length=40,
max_length=600) -> List[str]:
doc = self.nlp(body)._.coref_resolved
doc = self.nlp(doc)
return [
c.string.strip()
for c in doc.sents
if len(c.string.strip()) > min_length and
len(c.string.strip()) < max_length
]
@abstractmethod
def run_clusters(self,
content: List[str],
ratio=0.2,
algorithm='kmeans',
use_first: bool = True) -> List[str]:
raise NotImplementedError("Must Implement run_clusters")
def calculate_ratio_from_num_sentences(self, total_sentences: int,
num_sentences: int) -> float:
if total_sentences <= 0:
return 1.0
if total_sentences > num_sentences * 2:
num_sentences = num_sentences - 1
ratio = num_sentences / total_sentences
if ratio > 1:
ratio = 1.0
return ratio
def run(self,
body: str,
result_format: str = 'text',
num_sentences: int = 0,
ratio: float = 0.2,
min_length: int = 40,
max_length: int = 600,
use_first: bool = True,
algorithm='kmeans') -> str:
sentences = self.process_content_sentences(body, min_length, max_length)
if sentences:
if num_sentences > 0:
ratio = self.calculate_ratio_from_num_sentences(
len(sentences), num_sentences)
sentences = self.run_clusters(sentences, ratio, algorithm, use_first)
if result_format == 'array':
return json.dumps(sentences)
return ' '.join(sentences)
def __call__(self,
body: str,
result_format: str = 'text',
num_sentences: int = 0,
ratio: float = 0.2,
min_length: int = 40,
max_length: int = 600,
use_first: bool = True,
algorithm='kmeans') -> str:
return self.run(body, result_format, num_sentences, ratio, min_length,
max_length)
class SingleModel(ModelProcessor):
"""
Deprecated for naming sake.
"""
def __init__(self,
model='bert-large-uncased',
hidden: int = -2,
reduce_option: str = 'mean',
greedyness: float = 0.45):
super(SingleModel, self).__init__(model, hidden, reduce_option, greedyness)
def run_clusters(self,
content: List[str],
ratio=0.2,
algorithm='kmeans',
use_first: bool = True) -> List[str]:
hidden = self.model(content, self.hidden, self.reduce_option)
hidden_args = ClusterFeatures(hidden, algorithm).cluster(ratio)
if use_first:
if hidden_args[0] != 0:
hidden_args.insert(0, 0)
return [content[j] for j in hidden_args]
class Summarizer(SingleModel):
def __init__(self,
model='bert-large-uncased',
hidden: int = -2,
reduce_option: str = 'mean',
greedyness: float = 0.45):
super(Summarizer, self).__init__(model, hidden, reduce_option, greedyness)
``` |
{
"source": "joshweir/fx-economic-data-scraper",
"score": 4
} |
#### File: fx-economic-data-scraper/fxdatascraper/utils.py
```python
import logging
log = logging.getLogger(__name__)
def feet_to_meters(feet):
"""Convert feet to meters."""
try:
value = float(feet)
except ValueError:
log.error("Unable to convert to float: %s", feet)
else:
return (0.3048 * value * 10000.0 + 0.5) / 10000.0
``` |
{
"source": "joshweir/sense2vec-rest",
"score": 3
} |
#### File: joshweir/sense2vec-rest/s2v_synonyms.py
```python
import re
import os
from functools import cmp_to_key
MAX_CACHED_KEYS=15
class S2vSynonyms:
# allow_non_cached_keys when set to True will pass the list of keys in the call through to s2v.most_similar
# disregarding most_similar cache. s2v most_similar cache is single key based, multiple keys will be ignored
# except for the last key in the list.
# allow_non_cached_keys when set to False will pass through single key list entries to most_similar,
# multiple keys will be first joined and asserted as existing before passing to most_similar
#
# allow_non_cached_keys defaults to False and will currently throw exception if set to True because:
# * s2v 1.0.2 most_similar will use cache when available, but will always use the cache and if multiple keys
# are sent to most_similar it will just use the last key to collect most_similar entries
# * even if i fix this above (by first checking if the key is in cache and process accordingly (in s2v.most_similar)),
# the most_similar cosine similarity is very slow, would need to run on a gpu
def __init__(self, s2v_util, s2v_key_variations, s2v_key_commonizer, allow_non_cached_keys=False):
self.s2v_util = s2v_util
self.s2v_key_variations = s2v_key_variations
self.s2v_key_commonizer = s2v_key_commonizer
self.allow_non_cached_keys = allow_non_cached_keys
if self.allow_non_cached_keys:
raise ValueError('allow_non_cached_keys cannot currently be truthy, see comment in S2vSynonyms class for more info')
def call(self, d, req_args={}):
return self.most_similar_wrapper(
self.commonize_input(d),
req_args,
)
def commonize_input(self, d):
d_list = None
if isinstance(d, str):
d_list = [d]
elif isinstance(d, list):
d_list = d
elif isinstance(d, dict):
d_list = [d['phrase']] if isinstance(d['phrase'], str) else d['phrase']
else:
raise ValueError("dont recognize type of input: {0} {1}".format(type(d), d))
d_common_input = self.s2v_key_commonizer.call(d_list)
is_proper = d['is_proper'] if 'is_proper' in d else self.s2v_util.phrase_is_proper(list(map(lambda x: self.s2v_util.s2v.split_key(x['wordsense'])[0], d_common_input)))
return { 'phrase': d_common_input, 'is_proper': is_proper }
def most_similar_wrapper(self, d, req_args):
result = []
k_len = len(d['phrase'])
d_keys = list(map(lambda x: x['wordsense'], d['phrase']))
n_results = req_args.get('n') and int(req_args.get('n')) or 10
attempt_phrase_join_for_compound_phrases = req_args.get('attempt-phrase-join-for-compound-phrases')
d_variations = self.s2v_key_variations.call(
d['phrase'],
must_only_phrase_join_for_compound_phrases = attempt_phrase_join_for_compound_phrases,
flag_joined_phrase_variations = True,
phrase_is_proper = d['is_proper'],
limit = 25,
)
current_priority = 1
current_priority_group = []
for d_variation in d_variations:
priority = d_variation['priority']
if priority != current_priority:
result, reached_limit = self.merge_current_priority_group_with_result(
current_priority_group,
result,
n_results,
req_args,
d_keys,
)
current_priority_group = []
if reached_limit:
break
current_priority = priority
d_variation_keys = list(map(lambda x: x['wordsense'], d_variation['key']))
d_variation_keys_words = self.s2v_util.words_only(d_variation['key'])
if os.getenv('S2V_VERBOSE'):
print()
print('k', d_variation_keys, ':')
print()
if len(d_variation_keys) <= 1 or self.allow_non_cached_keys:
for r in self.s2v_util.s2v.most_similar(d_variation_keys, n=min([MAX_CACHED_KEYS, max([n_results * 2, 10])])):
value, score = r
if os.getenv('S2V_VERBOSE'):
print(value, score)
word, sense = self.s2v_util.s2v.split_key(value)
if self.matches_required_properness(word, d['is_proper']):
current_priority_group = self.merge_synonym_result_with_list(current_priority_group, word, sense, score)
result, reached_limit = self.merge_current_priority_group_with_result(
current_priority_group,
result,
n_results,
req_args,
d_keys,
)
return result
def merge_synonym_result_with_list(self, result, word, sense, score):
new_result = []
score = round(float(score), 3)
found = False
for r in result:
if r['word'] == word:
found = True
if score > r['score']:
new_result.append({
'word': word,
'sense': sense,
'score': score,
})
else:
new_result.append(r)
else:
new_result.append(r)
if not found:
new_result.append({
'word': word,
'sense': sense,
'score': score,
})
return new_result
def merge_current_priority_group_with_result(
self,
current_priority_group,
result,
n_results,
req_args,
d_keys,
):
current_priority_group = self.reduce_results_based_on_req_args(current_priority_group, d_keys, req_args)
current_priority_group.sort(key=cmp_to_key(self.sort_by_score))
result_len = len(result)
count_remaining = n_results - result_len
if count_remaining > 0:
result += current_priority_group[:count_remaining]
reached_limit = True if count_remaining <= 0 or len(current_priority_group) >= count_remaining else False
return (result, reached_limit)
def sort_by_score(self, a, b):
if b['score'] > a['score']:
return 1
else:
return -1
def reduce_results_based_on_req_args(self, results, d, req_args):
if req_args.get('reduce-multicase'):
results = self.filter_reduce_multicase(results, d)
# if req_args.get('reduce-multi-wordform'):
# results = self.filter_reduce_multi_wordform(results, d)
if req_args.get('match-input-sense'):
results = self.filter_match_input_sense(results, d)
if req_args.get('reduce-compound-nouns'):
results = self.filter_reduce_compound_nouns(results, d)
if req_args.get('min-word-len'):
results = self.filter_min_word_len(results, int(req_args.get('min-word-len')))
if req_args.get('min-score'):
results = self.filter_min_score(results, float(req_args.get('min-score')))
return results
# remove synonyms that match input sense first word or last word
# like input: foo then remove synonyms like: foo_bar or baz_foo
def filter_reduce_compound_nouns(self, data, d):
result = []
input_list = list(map(self.s2v_util.s2v.split_key, [d] if isinstance(d, str) else d))
if len(input_list) > 1 or not self.s2v_util.is_single_word(input_list[0][0]):
return data
input_value = input_list[0][0]
for item in data:
value_word = item.get('word')
value_sense = item.get('sense')
compound_prefix_pattern = r".\s" + re.escape(input_value) + r"$"
compound_suffix_pattern = r"^" + re.escape(input_value) + r"\s."
if not re.search(
compound_prefix_pattern,
value_word,
re.IGNORECASE,
) and not re.search(
compound_suffix_pattern,
value_word,
re.IGNORECASE,
):
result.append(item)
return result
def filter_reduce_multicase(self, data, d):
seen, result = set(), []
input_list = [d] if isinstance(d, str) else d
input_lower = [self.s2v_util.s2v.split_key(d)[0].lower()] if isinstance(d, str) else list(map(lambda x: self.s2v_util.s2v.split_key(x)[0].lower(), d))
for item in data:
value_lower = item.get('word').lower()
if value_lower not in seen and value_lower not in input_lower:
seen.add(value_lower)
result.append(item)
return result
def filter_match_input_sense(self, results, d):
input_list = [d] if isinstance(d, str) else d
if len(input_list) == 1:
term, sense = self.s2v_util.s2v.split_key(input_list[0])
generic_sense = self.s2v_util.get_generic_sense(sense)
if generic_sense == 'unknown':
return results
return list(filter(self.sense_matches_result(generic_sense), results))
# only if all input term senses map to the same sense
# filter on this sense, otherwise return all results
distinct_input_senses = self.s2v_util.uniq(map(self.extract_sense_from_s2v_tuple, d))
if len(distinct_input_senses) > 1:
return results
generic_sense = self.s2v_util.get_generic_sense(distinct_input_senses[0])
if generic_sense == 'unknown':
return results
return list(filter(self.sense_matches_result(generic_sense), results))
def filter_min_score(self, results, min_score):
return list(filter(lambda x: x['score'] >= min_score, results))
def filter_min_word_len(self, results, min_word_len):
return list(filter(lambda x: len(x['word']) >= min_word_len, results))
def sense_matches_result(self, input_sense):
def h(r):
return input_sense == self.s2v_util.get_generic_sense(self.extract_sense_from_result(r))
return h
def extract_sense_from_s2v_tuple(self, d):
return self.s2v_util.s2v.split_key(d)[1]
def extract_sense_from_result(self, d):
return d.get('sense')
def matches_required_properness(self, phrase, is_proper):
if is_proper is None:
return True
phrase_properness = self.s2v_util.phrase_is_proper([phrase])
return phrase_properness == is_proper
# def filter_reduce_multi_wordform(self, data, d):
# seen, result = set(), []
# input_list = list(
# map(s2v.split_key, [d] if isinstance(d, str) else d))
# input_list_reduced_to_lemma = list(
# map(lambda x: [get_lemma(x[0], x[1]), x[1]], input_list))
# for item in data:
# value = item.get('value')
# value_word, value_sense = s2v.split_key(value)
# value_word_lemma = get_lemma(value_word, value_sense)
# value_word_lemma_sense_joined = join_word_and_sense(value_word_lemma, value_sense)
# if value_word_lemma_sense_joined not in seen and [
# value_word_lemma, value_sense
# ] not in input_list_reduced_to_lemma:
# seen.add(value_word_lemma_sense_joined)
# result.append(item)
# return result
if __name__ == '__main__':
from sense2vec import Sense2Vec
from s2v_util import S2vUtil
from s2v_senses import S2vSenses
from s2v_key_case_and_sense_variations import S2vKeyCaseAndSenseVariations
from s2v_key_commonizer import S2vKeyCommonizer
S2V_MODAL_PATH = os.getenv('S2V_MODEL_PATH')
print("loading model from disk..", S2V_MODAL_PATH)
s2v = Sense2Vec().from_disk(S2V_MODAL_PATH)
print("model loaded.")
s2v_util = S2vUtil(s2v)
s2v_senses = S2vSenses(s2v_util)
s2v_key_variations = S2vKeyCaseAndSenseVariations(s2v_util, s2v_senses)
s2v_key_commonizer = S2vKeyCommonizer()
syn_service = S2vSynonyms(s2v_util, s2v_key_variations, s2v_key_commonizer)
req_args = {
'attempt-phrase-join-for-compound-phrases': 1,
'min-score': 0.5,
'n': 10,
'match-input-sense': 1,
'reduce-multicase': 1,
'reduce-compound-nouns': 1,
'min-word-len': 2,
}
k = { 'phrase': ["war|NOUN"], 'is_proper': None }
result = syn_service.call(k, req_args)
print(result)
print()
k = { 'phrase': ["war|NOUN"], 'is_proper': True }
result = syn_service.call(k, req_args)
print(result)
print()
k = ["black|NOUN"]
result = syn_service.call(k, req_args)
print(result)
print()
k = { 'phrase': ["New_York|LOC"], 'is_proper': True }
result = syn_service.call(k, req_args)
print(result)
print()
k = ["big|ADJ", "apple|NOUN"]
result = syn_service.call(k, req_args)
print('should return no results because input phrase is not proper')
print(result)
print()
``` |
{
"source": "joshwestbury/weather-app",
"score": 3
} |
#### File: joshwestbury/weather-app/models.py
```python
import datetime
import os
import peewee
from playhouse.db_url import connect
from playhouse.postgres_ext import JSONField
DB = connect(
os.environ.get('DATABASE_URL', 'postgres://localhost:5432/weather'))
class BaseModel(peewee.Model):
class Meta:
database = DB
class Weather(BaseModel):
city = peewee.CharField(max_length=60)
weather_data = JSONField()
created = peewee.DateTimeField(default=datetime.datetime.utcnow)
def __str__(self):
return self.city
``` |
{
"source": "joshwfoster/RadioAxionSearch",
"score": 2
} |
#### File: RadioAxionSearch/python/load.py
```python
import sys, os, h5py, corner
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import stats, signal, ndimage, interpolate
import astropy
from astropy.io import fits
###################################################################
### Get the Expected Flux Density from a Calibration Source ###
###################################################################
def get_expected(freq, source):
'''
Calculate the frequency-dependent expected flux
density for a calibration source
:param freq: the radio frequency [GHz]
:param source: the name of the calibration source ['3C286' or '3C48']
:returns: the expected flux density [Jy]
'''
params_3C286 = np.array([1.2481, -0.4507, -0.1798, 0.0357])
params_3C48 = np.array([1.3253, -0.7553, -0.1914, 0.0498])
if source == '3C48':
a0 = params_3C48[0]
a1 = params_3C48[1]
a2 = params_3C48[2]
a3 = params_3C48[3]
elif source == '3C286':
a0 = params_3C286[0]
a1 = params_3C286[1]
a2 = params_3C286[2]
a3 = params_3C286[3]
else:
print 'Invalid Source'
return None
return 10**(a0 + a1*np.log10(freq) + a2 * np.log10(freq)**2 + a3 * np.log10(freq)**3)
##########################################################################
### Load the Data From the Data Dictionary and Make the Data Stack ###
##########################################################################
def get_stack(data_dir, data_tag):
data_stack = np.zeros((4, 0))
freqs = np.zeros((0))
xx_sig_accepted = np.zeros((0))
xx_sig_on = np.zeros((0))
xx_sig_off = np.zeros((0))
xx_ref_accepted = np.zeros((0))
xx_ref_on = np.zeros((0))
xx_ref_off = np.zeros((0))
yy_sig_accepted = np.zeros((0))
yy_sig_on = np.zeros((0))
yy_sig_off = np.zeros((0))
yy_ref_accepted = np.zeros((0))
yy_ref_on = np.zeros((0))
yy_ref_off = np.zeros((0))
for filename in os.listdir(data_dir):
if data_tag in filename:
print filename
data = np.load(data_dir + filename)['arr_0'].item()
XX = data['XX_Out']
YY = data['YY_Out']
xx_tcal = data['XX_TCal']
yy_tcal = data['YY_TCal']
freqs = np.append(freqs, data['freqs'])
xx_sig_accepted = np.append(xx_sig_accepted, XX[0])
xx_sig_on = np.append(xx_sig_on, XX[1])
xx_sig_off = np.append(xx_sig_off, XX[2])
xx_ref_accepted = np.append(xx_ref_accepted, XX[3])
xx_ref_on = np.append(xx_ref_on, XX[4])
xx_ref_off = np.append(xx_ref_off, XX[5])
yy_sig_accepted = np.append(yy_sig_accepted, YY[0])
yy_sig_on = np.append(yy_sig_on, YY[1])
yy_sig_off = np.append(yy_sig_off, YY[2])
yy_ref_accepted = np.append(yy_ref_accepted, YY[3])
yy_ref_on = np.append(yy_ref_on, YY[4])
yy_ref_off = np.append(yy_ref_off, YY[5])
sort_order = np.argsort(freqs)
freqs = freqs[sort_order]
xx_sig_accepted = xx_sig_accepted[sort_order]
xx_ref_accepted = xx_ref_accepted[sort_order]
yy_sig_accepted = yy_sig_accepted[sort_order]
yy_ref_accepted = yy_ref_accepted[sort_order]
xx_sig_on = xx_sig_on[sort_order]
xx_sig_off = xx_sig_off[sort_order]
xx_sig = (xx_sig_on + xx_sig_off) /2.
xx_ref_on = xx_ref_on[sort_order]
xx_ref_off = xx_ref_off[sort_order]
xx_ref = (xx_ref_on + xx_ref_off) / 2.
yy_sig_on = yy_sig_on[sort_order]
yy_sig_off = yy_sig_off[sort_order]
yy_sig = (yy_sig_on + yy_sig_off) /2.
yy_ref_on = yy_ref_on[sort_order]
yy_ref_off = yy_ref_off[sort_order]
yy_ref = (yy_ref_on + yy_ref_off) /2.
xx_ref_tsys = xx_tcal * ( xx_ref_off / (xx_ref_on - xx_ref_off) + .5)
yy_ref_tsys = yy_tcal * ( yy_ref_off / (yy_ref_on - yy_ref_off) + .5)
ref_tsys = (xx_ref_tsys + yy_ref_tsys) / 2
xx_sig_tsys = xx_tcal * ( xx_sig_off / (xx_sig_on - xx_sig_off) + .5)
yy_sig_tsys = yy_tcal * ( yy_sig_off / (yy_sig_on - yy_sig_off) + .5)
sig_tsys = (xx_sig_tsys + yy_sig_tsys) / 2
xx_ta = (xx_sig - xx_ref) / xx_ref * ndimage.median_filter(xx_ref_tsys, size = 31)
yy_ta = (yy_sig - yy_ref) / yy_ref * ndimage.median_filter(yy_ref_tsys, size = 31)
ta = (xx_ta + yy_ta) / 2
total_temp = ref_tsys + sig_tsys + ta
sig = (xx_sig + yy_sig)/2
ref = (xx_ref + yy_ref)/2
data_stack = np.vstack((freqs, ta, sig, ref, xx_sig_accepted, xx_ref_accepted, yy_sig_accepted, yy_ref_accepted))
return data_stack
##################################################
### Load the Data at Specified Downbinning ###
##################################################
def downsample_stack(stack, downsample, shift = 0):
freqs = stack[0][shift:]
ta = stack[1][shift:]
sig = stack[2][shift:]
ref = stack[3][shift:]
xx_sig_accepted = stack[4][shift:]
xx_ref_accepted = stack[5][shift:]
yy_sig_accepted = stack[6][shift:]
yy_ref_accepted = stack[7][shift:]
sig_accepted = (xx_sig_accepted + yy_sig_accepted) / 2
ref_accepted = (xx_ref_accepted + yy_ref_accepted) / 2
max_index = len(freqs) / downsample * downsample
num_intervals = len(freqs) / downsample
out = np.zeros((6, num_intervals))
for i, item in enumerate([freqs, ta, sig, ref, sig_accepted, ref_accepted]):
item = np.mean(item[:max_index].reshape(num_intervals, downsample), axis = 1)
out[i] = np.copy(item)
return out
def load(data_dir, data_tag, downsample = 1, do_shift = False):
if do_shift:
return downsample_stack(get_stack(data_dir, data_tag), downsample, shift = downsample / 2)
else:
return downsample_stack(get_stack(data_dir, data_tag), downsample)
``` |
{
"source": "JoshWidrick/RPi_LED",
"score": 3
} |
#### File: RPi_LED/web/panel.py
```python
from flask import Flask, request, render_template, redirect, flash
import requests, json
import sys
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secr3t'
STATUS_KEY = ['mode', '<KEY> 'sb', 'brightness', 'power', 'speed', 'wait_time', 'percentage', 'spercentage']
CONTROLLER_KEY = ['name', 'ip', 'port']
def import_controllers():
retval = {}
for i in get_controllers():
retval[i[0]] = [i[1], i[2]]
return retval
def get_controllers():
with open("./file/controllers.txt", "r") as f:
try:
if f is None:
return []
return [l.strip().split(',') for l in f.readlines()]
except:
return 'failed'
def add_controller(name, ip, port):
with open("./file/controllers.txt", "a") as f:
try:
f.write(f'{name},{ip},{port}\n')
print(f'{name},{ip},{port}')
return 'Success'
except Exception as e:
return 'Failed' + str(e)
def import_status(resp_status):
listx = resp_status.split(',')
retval = {'list': listx}
count = 0
for item in listx:
retval[STATUS_KEY[count]] = item
count = count + 1
return retval
def get_controller_status(ip, port):
resp = requests.get(f'http://{ip}:{port}/status')
resp = resp.json()
return import_status(resp['status'])
def get_all_controller_status(controllers):
retval = {}
for i in controllers:
ip = i[1]
port = i[2]
retval[i[0]] = get_controller_status(ip, port)
return retval
@app.route('/panel', methods=['GET', 'POST'])
def panel():
# controllers = get_controllers()
controllers = import_controllers()
return redirect(f'/panel/{list(controllers.keys())[0]}')
# cstatuses = get_all_controller_status(controllers)
# return render_template('panel.html', controller=controllers[list(controllers.keys())[0]], controllers=list(controllers.keys()))
@app.route('/panel/<controller>', methods=['GET', 'POST'])
def panel_c(controller):
controllers = import_controllers()
print(controllers)
x = controllers[controller]
print(x)
status = get_controller_status(x[0], x[1])
# controllers = get_controllers()
return render_template('panel.html', controller=controller, controllers=list(controllers.keys()), status=status)
@app.route('/add', methods=['GET', 'POST'])
def add():
if request.method == 'POST':
retval = add_controller(request.form['name'], request.form['ip_address'], request.form['port'])
flash(retval)
return redirect('/panel')
return render_template('add.html')
@app.route('/power/<toggle>/<controller>')
def power(toggle, controller):
print(toggle)
return redirect(f'/panel/{controller}')
sys.stdout.write('starting flask app. \n')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=420, debug=False)
``` |
{
"source": "josh/wikidatabots",
"score": 3
} |
#### File: josh/wikidatabots/P4985.py
```python
import logging
from tqdm import tqdm
import tmdb
from page import blocked_qids
from sparql import sparql
def main():
"""
Find Wikidata items that are missing a TMDb person ID (P4985) but have a
IMDb ID (P345). Attempt to look up the person by IMDb ID via the TMDb API.
If there's a match, create a new statement.
Outputs QuickStatements CSV commands.
"""
query = """
SELECT DISTINCT ?item ?imdb ?random WHERE {
?item wdt:P345 ?imdb.
VALUES ?classes {
wd:Q5
wd:Q16334295
wd:Q95074
wd:Q14514600
wd:Q431289
wd:Q59755569
}
# ?item (wdt:P31/(wdt:P279*)) ?classes.
?item wdt:P31 ?classes.
OPTIONAL { ?item wdt:P4985 ?tmdb. }
FILTER(!(BOUND(?tmdb)))
BIND(MD5(CONCAT(STR(?item), STR(RAND()))) AS ?random)
}
ORDER BY ?random
LIMIT 5000
"""
results = sparql(query)
print("qid,P4985")
for result in tqdm(results):
if result["item"] in blocked_qids():
logging.debug("{} is blocked".format(result["item"]))
continue
person = tmdb.find(id=result["imdb"], source="imdb_id", type="person")
if not person:
continue
print('{},"""{}"""'.format(result["item"], person["id"]))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
```
#### File: josh/wikidatabots/P6398.py
```python
import logging
from tqdm import tqdm
import appletv
from page import blocked_qids, page_qids
from sparql import fetch_statements, sample_items, type_constraints
def main():
"""
Find Wikidata items that are missing a iTunes movie ID (P6398) but have a
Apple TV movie ID (P9586).
Outputs QuickStatements CSV commands.
"""
qids = page_qids("User:Josh404Bot/Preliminarily matched/P6398")
qids |= sample_items("P9586", limit=1000)
allowed_classes = type_constraints("P6398")
results = fetch_statements(qids, ["P31", "P6398", "P9586"])
print("qid,P6398")
for qid in tqdm(results):
item = results[qid]
if qid in blocked_qids():
logging.debug("{} is blocked".format(qid))
continue
if not item.get("P31") or item.get("P6398"):
continue
instance_of = set([v for (_, v) in item["P31"]])
if instance_of.isdisjoint(allowed_classes):
continue
for (statement, value) in item.get("P9586", []):
movie = appletv.movie(value)
if movie and movie["itunes_id"]:
print('{},"""{}"""'.format(qid, movie["itunes_id"]))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
```
#### File: josh/wikidatabots/P9586.py
```python
import html
import itertools
import json
import re
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import appletv
from page import page_statements
from sparql import sparql
from utils import shuffled
def parseurl(url):
m = re.match(
r"https://tv.apple.com/us/(movie)/([^/]+/)?(umc.cmc.[0-9a-z]+)",
url,
)
if m:
return (m.group(1), m.group(3))
return ("unknown", None)
def fetch_movie(url):
r = requests.get(url, headers=appletv.request_headers)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
ld = find_ld(soup)
if not ld:
return None
title = html.unescape(ld["name"])
try:
year = int(ld.get("datePublished", "")[0:4])
except ValueError:
return None
directors = set()
for director in ld.get("director", []):
directors.add(html.unescape(director["name"]))
if not directors:
return None
return (title, year, directors)
def find_ld(soup):
for script in soup.find_all("script", {"type": "application/ld+json"}):
ld = json.loads(script.string)
if ld["@type"] == "Movie":
return ld
return None
def wikidata_search(title, year, directors):
query = "SELECT DISTINCT ?item ?appletv WHERE {\n"
query += """
SERVICE wikibase:mwapi {
bd:serviceParam wikibase:endpoint "www.wikidata.org";
wikibase:api "EntitySearch";
mwapi:search "<<TITLE>>";
mwapi:language "en".
?item wikibase:apiOutputItem mwapi:item.
}
OPTIONAL { ?item rdfs:label ?titleLabel. }
OPTIONAL { ?item skos:altLabel ?titleAltLabel. }
FILTER(((LCASE(STR(?titleLabel))) = LCASE("<<TITLE>>")) ||
((LCASE(STR(?titleAltLabel))) = LCASE("<<TITLE>>")))
""".replace(
"<<TITLE>>", title.replace('"', '\\"')
)
years = [year, year - 1]
query += """
?item wdt:P577 ?date.
"""
query += (
"FILTER("
+ " || ".join(["((xsd:integer(YEAR(?date))) = {} )".format(y) for y in years])
+ ")"
)
query += """
?item wdt:P57 ?director.
?director rdfs:label ?directorLabel.
"""
query += (
"FILTER("
+ " || ".join(
[
'(STR(?directorLabel)) = "{}"'.format(d.replace('"', '\\"'))
for d in directors
]
)
+ ")"
)
query += """
VALUES ?classes {
wd:Q11424
wd:Q506240
}
?item (wdt:P31/(wdt:P279*)) ?classes.
OPTIONAL { ?item wdt:P9586 ?appletv }
"""
query += "\n} LIMIT 2"
results = sparql(query)
if len(results) == 1:
return results[0]
return None
def matched_appletv_ids():
query = "SELECT DISTINCT ?appletv WHERE { ?statement ps:P9586 ?appletv. }"
ids = set()
for result in sparql(query):
ids.add(result["appletv"])
return ids
def main():
limit = 500
skip_ids = matched_appletv_ids()
page_title = "User:Josh404Bot/Preliminarily matched/P9586"
def candiate_urls():
for (item, property, id) in page_statements(page_title):
if property != "P9586":
continue
if not id or id in skip_ids:
continue
url = "https://tv.apple.com/us/movie/{}".format(id)
yield (url, id)
for url in shuffled(appletv.fetch_new_sitemap_urls())[0:250]:
(type, id) = parseurl(url)
if type != "movie":
continue
if not id or id in skip_ids:
continue
yield (url, id)
for index_url in shuffled(appletv.fetch_sitemap_index_urls())[0:250]:
for url in shuffled(appletv.fetch_sitemap_index(index_url)):
(type, id) = parseurl(url)
if type != "movie":
continue
if not id or id in skip_ids:
continue
yield (url, id)
print("qid,P9586")
for (url, id) in tqdm(itertools.islice(candiate_urls(), limit), total=limit):
info = fetch_movie(url)
if not info:
continue
result = wikidata_search(*info)
if result and not result["appletv"]:
print('{},"""{}"""'.format(result["item"], id))
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
main()
```
#### File: josh/wikidatabots/pwb.py
```python
import os
import tempfile
import pywikibot
def login(username, password):
"""
Log into Wikidata.
Writes an authenticated pywikibot.lwp to the current working directory.
"""
password_file = tempfile.NamedTemporaryFile(mode="w", delete=False)
password_file.write('("{}", "{}")'.format(username, password))
password_file.close()
pywikibot.config.usernames["wikidata"]["wikidata"] = username
pywikibot.config.password_file = password_file.name
site = pywikibot.Site("wikidata", "wikidata")
site.login()
os.unlink(password_file.name)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Pywikibot wrapper script")
parser.add_argument("--username", action="store")
parser.add_argument("--password", action="store")
parser.add_argument("cmd", action="store")
args = parser.parse_args()
if args.cmd == "login":
login(
args.username or os.environ["WIKIDATA_USERNAME"],
args.password or os.environ["WIKIDATA_PASSWORD"],
)
else:
parser.print_usage()
```
#### File: josh/wikidatabots/redirect_P345.py
```python
import logging
import pywikibot
from tqdm import tqdm
import imdb
from sparql import sample_items
REASON_FOR_DEPRECATION = "P2241"
REDIRECT = "Q45403344"
def main():
pywikibot.config.usernames["wikidata"]["wikidata"] = "Josh404"
site = pywikibot.Site("wikidata", "wikidata")
repo = site.data_repository()
qids = sample_items("P345", limit=10)
redirect_page = pywikibot.ItemPage(repo, REDIRECT)
for qid in tqdm(qids):
item = pywikibot.ItemPage(repo, qid)
if item.isRedirectPage():
logging.debug("{} is a redirect".format(item))
continue
for claim in item.claims.get("P345", []):
id = claim.target
if claim.rank == "deprecated":
continue
if not imdb.is_valid_id(id):
logging.debug("{} is invalid format".format(id))
continue
new_id = imdb.canonical_id(id)
if not new_id:
logging.debug("{} not found".format(id))
continue
if id is not new_id:
claim.setRank("deprecated")
qualifier = pywikibot.Claim(repo, REASON_FOR_DEPRECATION)
qualifier.isQualifier = True
qualifier.setTarget(redirect_page)
claim.qualifiers[REASON_FOR_DEPRECATION] = [qualifier]
if claim_exists(item, "P345", new_id):
item.editEntity({"claims": [claim.toJSON()]})
else:
new_claim = pywikibot.Claim(repo, "P345")
new_claim.setTarget(new_id)
item.editEntity({"claims": [new_claim.toJSON(), claim.toJSON()]})
def claim_exists(page, property, value):
for claim in page.claims[property]:
if claim.target == value:
return True
return False
if __name__ == "__main__":
main()
```
#### File: josh/wikidatabots/sparql.py
```python
import json
import logging
import math
import os
import platform
import backoff
import requests
url = "https://query.wikidata.org/sparql"
session = requests.Session()
session.headers.update({"Accept": "application/sparql-results+json"})
USER_AGENT = []
if "WIKIDATA_USERNAME" in os.environ:
USER_AGENT.append(
"{username}/1.0 (User:{username})".format(
username=os.environ["WIKIDATA_USERNAME"]
)
)
else:
logging.warn("WARN: WIKIDATA_USERNAME unset")
USER_AGENT.append("requests/" + requests.__version__)
USER_AGENT.append("Python/" + platform.python_version())
session.headers.update({"User-Agent": " ".join(USER_AGENT)})
class TimeoutException(Exception):
pass
@backoff.on_exception(backoff.expo, TimeoutException, max_tries=14)
@backoff.on_exception(backoff.expo, json.decoder.JSONDecodeError, max_tries=3)
def sparql(query):
"""
Execute SPARQL query on Wikidata. Returns simplified results array.
"""
r = session.post(url, data={"query": query})
if r.status_code == 500 and "java.util.concurrent.TimeoutException" in r.text:
raise TimeoutException(query)
r.raise_for_status()
data = r.json()
vars = data["head"]["vars"]
bindings = data["results"]["bindings"]
logging.info(
"sparql: {} results in {} ms".format(
len(bindings), math.floor(r.elapsed.total_seconds() * 1000)
)
)
def results():
for binding in bindings:
yield {var: format_value(binding.get(var)) for var in vars}
def format_value(obj):
if obj is None:
return None
elif obj["type"] == "literal":
return obj["value"]
elif obj["type"] == "uri":
if obj["value"].startswith("http://www.wikidata.org/prop/"):
return obj["value"].replace("http://www.wikidata.org/prop/", "")
elif obj["value"] == "http://wikiba.se/ontology#DeprecatedRank":
return "deprecated"
elif obj["value"] == "http://wikiba.se/ontology#NormalRank":
return "normal"
elif obj["value"] == "http://wikiba.se/ontology#PreferredRank":
return "preferred"
elif obj["value"].startswith("http://www.wikidata.org/entity/"):
label = obj["value"].replace("http://www.wikidata.org/entity/", "")
if label.startswith("statement/"):
return "$".join(label.replace("statement/", "").split("-", 1))
else:
return label
else:
return obj["value"]
else:
return obj
return list(results())
def fetch_statements(qids, properties):
query = "SELECT ?statement ?item ?property ?value WHERE { "
query += values_query(qids)
query += """
OPTIONAL {
?item ?property ?statement.
?statement ?ps ?value.
?statement wikibase:rank ?rank.
FILTER(?rank != wikibase:DeprecatedRank)
}
"""
query += "FILTER(" + " || ".join(["(?ps = ps:" + p + ")" for p in properties]) + ")"
query += "}"
items = {}
for result in sparql(query):
statement = result["statement"]
qid = result["item"]
prop = result["property"]
value = result["value"]
item = items[qid] = items.get(qid, {})
properties = item[prop] = item.get(prop, [])
properties.append((statement, value))
return items
def type_constraints(property):
query = """
SELECT DISTINCT ?subclass WHERE {
"""
query += " wd:" + property + " p:P2302 ?constraint."
query += """
?constraint ps:P2302 wd:Q21503250.
?constraint pq:P2308 ?class.
?subclass wdt:P279* ?class.
}
"""
return set([r["subclass"] for r in sparql(query)])
def sample_items(property, limit, type=None):
if type is None:
items = set()
items |= sample_items(property, type="created", limit=math.floor(limit / 3))
items |= sample_items(property, type="updated", limit=math.floor(limit / 3))
items |= sample_items(property, type="random", limit=limit - len(items))
return items
elif type == "random":
query = """
SELECT ?item WHERE {
SERVICE bd:sample {
?item wdt:?property [].
bd:serviceParam bd:sample.limit ?limit ;
bd:sample.sampleType "RANDOM".
}
}
"""
elif type == "created":
query = """
SELECT ?item {
SERVICE wikibase:mwapi {
bd:serviceParam wikibase:endpoint "www.wikidata.org";
wikibase:api "Generator" ;
wikibase:limit "once" ;
mwapi:generator "search";
mwapi:gsrsearch "haswbstatement:?property" ;
mwapi:gsrsort "create_timestamp_desc" ;
mwapi:gsrlimit "?limit".
?item wikibase:apiOutputItem mwapi:title.
}
}
"""
elif type == "updated":
query = """
SELECT ?item {
SERVICE wikibase:mwapi {
bd:serviceParam wikibase:endpoint "www.wikidata.org";
wikibase:api "Generator" ;
wikibase:limit "once" ;
mwapi:generator "search";
mwapi:gsrsearch "haswbstatement:?property" ;
mwapi:gsrsort "last_edit_desc" ;
mwapi:gsrlimit "?limit".
?item wikibase:apiOutputItem mwapi:title.
}
}
"""
else:
assert False, "unknown type"
query = query.replace("?property", property)
query = query.replace("?limit", str(limit))
items = set()
for result in sparql(query):
assert result["item"]
items.add(result["item"])
return items
def values_query(qids, binding="item"):
values = " ".join("wd:{}".format(qid) for qid in qids)
return "VALUES ?" + binding + " { " + values + " }"
if __name__ == "__main__":
import json
import sys
logging.basicConfig(level=logging.INFO)
query = sys.stdin.readlines()
result = sparql(query)
json.dump(result, sys.stdout, indent=2)
```
#### File: josh/wikidatabots/test_itunes.py
```python
import itunes
def test_batch_lookup_one():
results = itunes.batch_lookup([285494571])
(id, result) = list(results)[0]
assert id == 285494571
assert result
assert result["trackName"] == "<NAME>"
def test_batch_lookup_miss():
results = itunes.batch_lookup([200000])
(id, result) = list(results)[0]
assert id == 200000
assert not result
```
#### File: josh/wikidatabots/tmdb.py
```python
import os
import backoff
import requests
TMDB_API_KEY = os.environ.get("TMDB_API_KEY")
class UnauthorizedException(Exception):
pass
@backoff.on_exception(backoff.expo, requests.exceptions.ConnectionError, max_tries=3)
def api_request(path, params={}, version=3, api_key=TMDB_API_KEY):
url = "https://api.themoviedb.org/{}{}".format(str(version), path)
post_params = {}
if api_key:
post_params["api_key"] = api_key
post_params.update(params)
r = requests.get(url, params=post_params)
if r.headers["Content-Type"].startswith("application/json"):
data = r.json()
if r.status_code == 401:
raise UnauthorizedException(data["status_message"])
return data
else:
r.raise_for_status()
return {}
object_types = set(["movie", "tv", "person"])
def object(id, type, append=[], api_key=TMDB_API_KEY):
assert type in object_types
params = {}
if append:
params["append_to_response"] = ",".join(append)
resp = api_request(
"/{}/{}".format(type, id),
params=params,
api_key=api_key,
)
if resp.get("success") is False:
return None
return resp
find_sources = set(
[
"imdb_id",
"freebase_mid",
"freebase_id",
"tvdb_id",
"tvrage_id",
"facebook_id",
"twitter_id",
"instagram_id",
]
)
find_types = set(["movie", "person", "tv", "tv_episode", "tv_season"])
def find(id, source, type, api_key=TMDB_API_KEY):
assert source in find_sources
assert type in find_types
resp = api_request(
"/find/{}".format(id),
params={"external_source": source},
api_key=api_key,
)
results = resp.get("{}_results".format(type))
if len(results) == 1:
return results[0]
else:
return None
```
#### File: josh/wikidatabots/utils.py
```python
import random
def batches(iterable, size):
batch = []
for element in iterable:
batch.append(element)
if len(batch) == size:
yield batch
batch = []
if batch:
yield batch
def shuffled(seq):
lst = list(seq)
random.shuffle(lst)
return lst
def uniq(*lists):
seen = []
for lst in lists:
for el in lst:
if el not in seen:
yield el
seen.append(el)
```
#### File: josh/wikidatabots/wikitext.py
```python
def link(title, url):
return "[{url} {title}]".format(url=url, title=title)
def item(qid):
return "{{Q|" + qid.replace("Q", "") + "}}"
def statement(statement):
statement = statement.replace("$", "-")
qid, guid = statement.split("-", 1)
return (
item(qid)
+ " "
+ link(
guid,
"http://www.wikidata.org/entity/statement/{}".format(statement),
)
)
``` |
{
"source": "josh-wilde/rl-compare",
"score": 3
} |
#### File: josh-wilde/rl-compare/EpisodicNStepSarsa.py
```python
import click
import gym
import numpy as np
from math import log, ceil, exp
from tiles3 import IHT
from QFunctions import LinearTilingQApproximator
# python EpisodicNStepSarsa.py --episodes 1 --alpha 0.03 --init_epsilon 0.1 --eps_decay_factor 0.0 --n 1 --gamma 0.9 --tile_resolution 8 --n_tilings 8 --d 3 --render
@click.command()
@click.option('--episodes', default=10)
@click.option('--alpha', default=0.1)
@click.option('--init_epsilon', default=0.1)
@click.option('--eps_decay_factor', default=0.1) # epsilon_t = init_epsilon*exp(-eps_decay_factor*t) so decay = 0 is constant
@click.option('--n', default=1)
@click.option('--gamma', default=0.5) # reward discounting factor
@click.option('--tile_resolution', default=8) # number of tiles in each dimension of feature space
@click.option('--n_tilings', default=8) # number of overlapping tilings
@click.option('--d', default=3) # either 2 if (position,velocity) and 3 if (position,velocity,acceleration)
@click.option('--render/--no-render', default=True)
def main(episodes, alpha, init_epsilon, eps_decay_factor,
n, gamma, tile_resolution, n_tilings, d, render):
# Instantiate the environment
env = gym.make('MountainCar-v0')
n_actions = 3 # Action space is 0,1,2 for mountain car
# Initialize the hash table to store the tiling
n_tiles = tile_resolution ** d * n_tilings * n_actions
iht = IHT(2**(1+ceil(log(n_tiles, 2)))) # should be double the hash table size that we need
# Initialize the Q function
q_hat = LinearTilingQApproximator(iht, n_tilings, tile_resolution)
# Initialize arrays to store actions, states and rewards
# n-step SARSA means storing current info AND info for n more steps
A = np.zeros(n+1, dtype=int)
S = np.zeros((n+1,d)) # each row is either [position, velocity] or [position, velocity, acceleration]
R = np.zeros(n+1)
# Loop over episodes
for episode in range(episodes):
# Initial observation
# For MountainCar, always starts with 0 velocity and append 0 acceleration
observation = list(env.reset())
if d == 2:
S[0] = observation
else:
S[0] = observation + [0]
# epsilon-greedy action based on initial state
if np.random.uniform() <= init_epsilon:
A[0] = env.action_space.sample()
else:
A[0] = np.argmax([q_hat(S[0], a) for a in range(n_actions)])
# Set termination time to infinity to start
# Initialize time counter
t = 0
T = np.inf
# render
if render: env.render()
### Print initial state
print('t = {:d}'.format(t))
print('S[t] = ' + np.array2string(S[0], precision=2))
print('A[0] = {:d}'.format(A[0]))
# Loop over time periods within an episode
while True:
# If we haven't terminated, then take an action
# Store the next state and reward
if t < T:
observation, reward, done, info = env.step(A[t % (n+1)])
if render: env.render()
R[(t+1) % (n+1)] = reward
if d == 2:
S[(t+1) % (n+1)] = list(observation)
else:
S[(t+1) % (n+1)] = list(observation) + [observation[1] - S[t % (n+1), 1]]
if done:
T = t + 1
else:
epsilon = init_epsilon*exp(-eps_decay_factor*t)
if np.random.uniform() <= epsilon:
A[(t+1) % (n+1)] = env.action_space.sample()
else:
A[(t+1) % (n+1)] = np.argmax([q_hat(S[(t+1) % (n+1)], a) for a in range(n_actions)])
### Print state for t + 1
print('After taking A[{:d}], the info for t+1 = {:d} is:'.format(t, t+1))
print('t + 1 = {:d}'.format(t+1))
print('S[t+1] = ' + np.array2string(S[(t+1) % (n+1)], precision=2))
print('R[t+1] = {:f}'.format(R[(t+1) % (n+1)]))
print('A[t+1] chosen based on S[t+1] and R[t+1].')
print('A[t+1] = {:d}'.format(A[(t+1) % (n+1)]))
# Set the period for which we are updating the weights
# E.g. if n = 1, then can start updating at t = 0 because we have stored S_1 and R_1
tau = t - n + 1
# If we are ready to update the first state, then go ahead
if tau >= 0:
# discounted n-step return
G = sum([gamma**(i-tau-1)*R[(i % (n+1))] for i in range(tau+1, min(tau+n, T) + 1)])
# if you haven't terminated within n steps, then add the estimated return to go
if tau + n < T:
G = G + gamma**n * q_hat(S[(tau+n) % (n+1)], A[(tau+n) % (n+1)])
# Adjust the weights based on gradient of the error
# The update function takes the state and the action to find the active tiles
# Then updates each tile by alpha * error
q_hat.update(S[tau % (n+1)],
A[tau % (n+1)],
alpha,
G - q_hat(S[tau % (n+1)], A[tau % (n+1)]))
if tau == T - 1:
print('Exiting at tau = {:d}'.format(tau))
break
t += 1
if __name__ == '__main__':
main()
```
#### File: josh-wilde/rl-compare/test-env.py
```python
import gym
import click
from RandomAgent import RandomAgent
@click.command()
@click.option('--env_name', default='MountainCar-v0')
@click.option('--episodes', default=1)
@click.option('--max_t', default=100)
@click.option('--render/--no-render', default=True)
def main(env_name, episodes, max_t, render):
# Instantiate the environment
env = gym.make(env_name)
# Instantiate agent
random_mountain_agent = RandomAgent(env.action_space)
# Loop through the episodes
for episode in range(episodes):
# Reset to obtain initial observation
observation = env.reset()
# Run the episode
for t in range(max_t):
if render: env.render()
print(observation)
# Agents act here based on current observation
action = random_mountain_agent.act(observation)
observation, reward, done, info = env.step(action) # step based on action
if done:
print('Episode finished after {} time steps'.format(t+1))
break
env.close()
if __name__ == '__main__':
main()
``` |
{
"source": "JoshWilde/test-python-action",
"score": 3
} |
#### File: JoshWilde/test-python-action/Paper_Vectors.py
```python
import numpy as np
import glob
import sklearn
import pdfminer
import pdfplumber
import PyPDF2
import nltk
from nltk import word_tokenize
from nltk import download
from nltk.corpus import stopwords
from pdfminer.high_level import extract_text
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem import WordNetLemmatizer
from nltk.probability import FreqDist
from sklearn.metrics.pairwise import cosine_similarity
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
import spacy
import os
os.system(f"echo '🎉 All imports OK'")
paper_path = os.environ['PAPER_PATH']
os.system(f"echo '📄 PDF file located here: {paper_path}'")
os.system('python -m spacy download en_core_web_lg')
model = spacy.load('en_core_web_lg')
paper_path = 'paper.pdf'
#POI_PDF = [extract_text(paper_path)] # Extracts text from the PDF file
def Get_Lemma_Words(POI_PDF):
'''
Parameters
----------
POI_PDF : list
A list containing a single string which is the contents of the paper
Returns
----------
words : array_like
An array where each element is a processed word from the text
'''
text = str(POI_PDF)
text2 = text.split() # splits the text into words
words_no_punc = [] # defines an empty list
for w in text2: # For each word in the text
if w.isalpha(): # If the word is an alphanumberic value
words_no_punc.append(w.lower()) # appends a lowercase version of the word to the no punctionation list
from nltk.corpus import stopwords # Import stop words
stopwords = stopwords.words('english') # Defines english stop words
clean_words = [] # define clean word list
for w in words_no_punc: # for each word in no punctionation list
if w not in stopwords: # if the word is not a stopword
clean_words.append(w) # if the word is not a stopword it is appended to the clean word list
clean_words_arr = '' # Defines an empty string
for i in range(len(clean_words)): # For each word in clean words
clean_words_arr = clean_words_arr + ' ' + str(clean_words[i]) # Appends the clean words to a string
string_for_lemmatizing = clean_words_arr
lemmatizer = WordNetLemmatizer()
words_2 = word_tokenize(string_for_lemmatizing)
lemmatized_words = [lemmatizer.lemmatize(word) for word in words_2]
lemmatized_words_arr = '' # Defines an empty string
for i in range(len(lemmatized_words)): # For each word iin lemmanised words
lemmatized_words_arr = lemmatized_words_arr + ' ' + str(lemmatized_words[i]) # Appends the lemmanised words to a string
words = word_tokenize(lemmatized_words_arr) # Tokenises each word in the text
return words
def Get_Top_Words_tf(Paper_interest, num_top20=20):
'''
Parameters
----------
Paper_interest : string
File path to the location of the PDF
df :
A
num_top20 : Int
Number of most frequent words that are used for calculating the vector of the paper
Returns
----------
top20_tf : array_like
Array of the most frequent words from the paper in order
'''
POI_PDF = [extract_text(Paper_interest)] # Extracts text from the PDF file
#print('Extracted text')
#text = str(POI_PDF)
words = Get_Lemma_Words(POI_PDF) # Lemmanises words from the extracted text
#print('Get Lemma Words')
top20_tf = -2 # If there are no lemmanised words, this function will output this value
#print('Top20 TF')
if len(words) > 0: # If there are lemmanised words
fdist = FreqDist(words) # Calculates the frequency for each lemmanised word in the text
#print('Freq Dist')
X = np.array(fdist.most_common()) # Sorts the words in order of frequency
#print('X')
top20_tf = X[:num_top20,0] # Saves the top N words as a list
#print('Top20 TF')
return top20_tf
#top20_tf = Get_Top_Words_tf(paper_path)
#words = Get_Lemma_Words(POI_PDF) # Lemmanises words from the extracted text
#top20_tf = -2 # If there are no lemmanised words, this function will output this value
#if len(words) > 0: # If there are lemmanised words
# fdist = FreqDist(words) # Calculates the frequency for each lemmanised word in the text
# X = np.array(fdist.most_common()) # Sorts the words in order of frequency
# top20_tf = X[:num_top20,0] # Saves the top N words as a list
#print(top20_tf)
def Generate_Paper_Vector(Paper_interest, model, num_top20=20):
'''
Parameters
----------
Paper_interest : string
File path to the location of the PDF
model :
A
df : Dictionary
A
get_word_fun : function
A
num_top20 : int
A
Returns
----------
pap_vector : array_like
An array of shape (300) representing where the given paper lies in the
model vector space.
doc_top20 : string
A string containing the 20 words that were
'''
#average_vector = np.zeros((300)) # Creates an array for 300 zeros
#print('Starting Top Words TF')
top20_tf = Get_Top_Words_tf(Paper_interest) # Gets the top N Words
#print(top20_tf)
#print(top20_tf)
doc_top20= '' # Creates empty string
if top20_tf != -2: # If the paper has lemmanised words
for i in top20_tf: # For each word in the top N
doc_top20 = doc_top20 + i +' ' # Appends each top N word to list
pap_vector = model(doc_top20).vector # generates a vector for the paper
#average_vector = average_vector + pap_vector
return pap_vector, doc_top20
# Generate TF Vectors Paper
def Paper_vectors_TF(paper_list, model,num_top20=20):
'''
Parameters
----------
paper_list : array_like
Array of file paths to PDF files
gen_pap_vec : function
A function to generate the vectors for paper that we are trying to find a reviewer for
Returns
----------
Paper_Dict : Dictionary
All the keys should be the DOI numbers for each paper taken from the file name. The items are vectors
of shape (300) which is the vector for where this paper lies in the model vector space.
Paper_20_Dict : Dictionary
All the keys should be the DOI numbers for each paper taken from the file name. The items are the
top 20 words from the paper that have been used to generate the vector representation.
'''
Paper_Dict = {} # Defines an empty dictionary
Paper_20_Dict = {} # Defines an empty dictionary
#for k in range(len(paper_list)): # For each paper
#print(paper_list[k]+ ' - ' +str(k))
#print('Starting Generate Paper Vectors')
paper_vector, doc_top20 = Generate_Paper_Vector(paper_list, model) # Generates paper vector and shows the top N words
#print(paper_vector)
# print(doc_top20)
Paper_Dict[paper_list] = paper_vector # Adds this vector to the dictionary
Paper_20_Dict[paper_list] = doc_top20 # Adds the top N words to the dictionary
# print(Paper_Dict)
#print(Paper_20_Dict)
return Paper_Dict, Paper_20_Dict
# Paper Cosine
def Paper_cosine(author_keys, author_vectors, paper_vec, N=5, printer=True):
'''
Parameters
----------
author_keys : Dictionary Keys
A
author_vectors : Dictionary
A
paper_vec : array-like
A
N : int
Number of reviewers suggested
printer : Boolean
A
Returns
----------
cos_sim_dict : dictionary
A
'''
cos_sim_list = [] # Creates an empty list
for i in range(len(author_keys)): # For each author key
idx = list(author_keys)[i] # Creates an index
author_vec = author_vectors[idx] # Loads the vector for the given author key
#print('paper vec')
#print(np.array([paper_vec]))
#print('author vec')
#print(np.array([author_vec]))
cos_sim = cosine_similarity(np.array([paper_vec]), np.array([author_vec]))[0,0] # Calculates the cosine similarity
# of the paper and the author of the index
cos_sim_list.append(cos_sim) # appends cosine similarity to a list of all cosine similarities for each author for
# this one paper
cos_sim_list = np.array(cos_sim_list) # Converts list to numpy array
cos_sim_dict = {} # Creates an empty dictionary
sorted_idx = np.argsort(cos_sim_list)[-N:] # Sorts list and selects the top N highest scoring authors
for i in range(N): # for each of the top N authors
idx = sorted_idx[-i-1] # Creates an index
doi = list(author_vectors)[idx] #Finds the author key for the high scoring cosine similarties
if printer == True:
print(doi + ' - ' + str(cos_sim_list[idx])[:6]) # Prints author key & cosine similarity for that author to the given paper
cos_sim_dict[doi] = cos_sim_list[idx] # Adds the author key & cosine similarity to a dictionary
#return cos_sim_dict
#paper_vector, doc_top20 = Generate_Paper_Vector(paper_path , model)
#print(paper_vector)
#print(doc_top20)
#paper_path = ['paper.pdf', 'paper.pdf', 'paper.pdf']
#print('Starting Paper Vectors TF')
Paper_Dict, Paper_20_Dict = Paper_vectors_TF(paper_path, model)
author_Dict = np.load('Author_Dict_generated.npy', allow_pickle=True).item()
author_keys = author_Dict.keys()
author_vectors = author_Dict
paper_vec = Paper_Dict[list(Paper_Dict)[0]]
print('Suggested Reviewers')
Paper_cosine(author_keys, author_vectors, paper_vec)
#print('SUCCESS!!!')
```
#### File: JoshWilde/test-python-action/test.py
```python
import numpy as np
import glob
import spacy
import re
import os
os.system(f"echo '🎉 All imports OK'")
glow = os.environ['GLOB_FOLDERS']
print(glow)
print('/n')
pdfs = os.environ['GLOB_PDFS']
print(pdfs)
pdfs = pdfs.split(' ')
def Make_Folder_dict(pdfs):
Master_dict = {}
for i in range(len(pdfs)):
print(pdfs[i])
J = re.search('/', pdfs[i])
K = re.search('/',pdfs[i][J.end():])
Folder_name = pdfs[i][J.end():J.end()+K.start()]
pdf_name = pdfs[i]#[J.end()+K.end():]
if Folder_name not in Master_dict:
Master_dict[Folder_name] = [pdf_name]
else:
Master_dict[Folder_name].append(pdf_name)
return Master_dict
def Author_vectors_TF(folder_names, num_top20=20):
Author_Dict = {} # Defines an empty dictionary
for i in range(len(list(folder_names))): # For each author
paper_list = folder_names[list(folder_names)[i]]
#average_vector = Reviewer_Paper_Vector(paper_list, model, num_top20) # Generates the average vector for all the papers in this folder
#Author_Dict[folder_names[k][directory_offset:]] = average_vector # Adds this average vector to the dictionary
#return Author_Dict
print('success minor!')
Master_dict = Make_Folder_dict(pdfs)
Author_vectors_TF(Master_dict)
#fold_loc = 'https://github.com/JoshWilde/test-python-action/tree/main/Author_Folders/ctb'
#print(fold_loc)
#folds = glob.glob(fold_loc)
#print(folds)
#folds = glob.glob(fold_loc+'/*')
#print(folds)
#paper_path = os.environ['FOLDER_PATH']
#print(paper_path)
#glo = os.environ[glob.glob(paper_path+'/*')]
#print(glo)
#paper_path = os.environ['PAPER_PATH']
#os.system(f"echo '📄 PDF file located here: {paper_path}'")
#os.system('python -m spacy download en_core_web_lg')
#model = en_core_web_lg.load()
#model = spacy.load('en_core_web_lg')
print('SUCCESS!')
#!python -m spacy download en_core_web_lg
#model = spacy.load('en_core_web_lg')
``` |
{
"source": "JoshWilkins2013/PriceBot",
"score": 3
} |
#### File: PriceBot/pricebot/Analyzer.py
```python
import os
import glob
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
class Analyzer(object):
def __init__(self, file_path=".\\Data\\", file_name=None, file_type="Automobile", last_update=None):
self.file_path = file_path
self.file_name = file_name
self.file_type = file_type
self.last_update = last_update
if file_name:
if file_type == 'Automobile':
self.car = self.file_name[:self.file_name.find('_')]
self.data = pd.read_csv(self.file_path + self.file_name)
self._clean_results()
else:
self.data = None
def _clean_results(self):
""" Clean up some of the columns to be consistent """
if self.file_type == "Automobile":
cols = ["Year", "Mileage", "Price"]
self.data.Mileage.replace([',', 'mi.', 'nan', ' '], '', regex=True, inplace=True) # Fix mileage column
self.data.Price.replace([',', '\$'], '', regex=True, inplace=True) # Always fix price column (, and $ removed)
self.data[cols] = self.data[cols].apply(pd.to_numeric, errors='coerce') # Coerces errors into NaN values
self.data.drop(self.data[self.data.Year < 2000].index, inplace=True) # Remove cars made before 2000
self.data.drop(self.data[self.data.Price > 30000].index, inplace=True) # Remove cars over $30,000
self.data.drop(self.data[(self.data.Mileage < 1000) | (self.data.Mileage > 300000)].index, inplace=True) # Remove cars with over 300,000 miles
self.data['Age'] = 2018 - self.data['Year'] # Change years to Age
elif self.file_type == "Apartment":
self.data.Area.replace(['ft2'], '', regex=True, inplace=True) # Remove ft2 from square footage column
self.data.Price.replace([',', '\$'], '', regex=True, inplace=True) # Always fix price column (, and $ removed)
else:
self.data['Street'], self.data['City'], self.data['State'] = self.data['Address'].str.split(',', 2).str
del self.data.Address
self.data.drop(self.data[self.data.Price > 1000000].index, inplace=True) # Remove houses worth more than $1 million
self.data.replace('^\s*$', np.nan, regex=True, inplace=True) # Replace all empty values with np.NaN
self.data = self.data.dropna(axis=1, how='all') # Remove Null Columns
self.data = self.data.apply(pd.to_numeric, errors='ignore') # Coerces errors into NaN values
def merge_results(self, car):
self.car = car
data_files = glob.glob(self.file_path + self.car + '*.csv')
item_data = pd.DataFrame()
for data_file in data_files:
df = pd.read_csv(data_file)
item_data = item_data.append(df)
self.file_name = self.car + "_Merged.csv"
item_data.to_csv(self.file_path + self.file_name, index=False)
self.data = item_data
self._clean_results()
def plot_results(self):
fig = plt.figure(figsize=(13, 6))
if self.file_type == "Automobile":
cols = ['Mileage', 'Price', 'Age', 'Link']
plt.xlabel('Mileage')
plt.title('Mileage vs Cost (Age in Color)')
elif self.file_type == 'Apartment':
cols = ['Area', 'Price', 'Link']
plt.xlabel('Square Feet')
plt.title('SqFt vs Cost (Number of Bedrooms in Color)')
else:
return
new_df = self.data[cols]
new_df = new_df.dropna(axis=0, how='any') # Remove rows with missing values
for col in cols[:-1]:
new_df[col] = new_df[col].astype(int)
s = plt.scatter(x=new_df[cols[0]], y=new_df[cols[1]])
s.set_urls(new_df['Link'].values)
plt.grid(which='both')
plt.ylabel('Cost')
plt.show()
fig.canvas.print_figure('MergedData.svg')
def get_best_cars(self, filter_by='Mileage', n_iter=4, last_update=None, filtered_data=None, plot_show=False):
for i in range(n_iter):
# Remove things above Age line of best fit
coeffs = self.best_fit(col=filter_by)
p = np.poly1d(coeffs)
plt.clf()
plt.scatter(x=self.data[filter_by], y=self.data['Price'], cmap='plasma_r')
self.data = self.data[self.data['Price'] < p(self.data[filter_by])] # Remove points above average price trendline
plt.plot(self.data[filter_by], p(self.data[filter_by]), 'ro') # Average price trendline by age/mileage
if filtered_data is not None:
plt.scatter(x=filtered_data[filter_by], y=filtered_data['Price'], cmap='plasma_r')
plt.xlabel('filter_by')
plt.ylabel('Price')
if plot_show:
plt.show()
plt.close()
fig = plt.figure(figsize=(13, 6))
s = plt.scatter(self.data[filter_by], y=self.data['Price'], color='blue')
s.set_urls(self.data['Link'].values)
# Color cars that meet filter green
if filtered_data is not None:
filtered_merged = pd.merge(filtered_data, self.data, how='inner') # Not sure why I have to do this
s = plt.scatter(x=filtered_merged[filter_by], y=filtered_merged['Price'], color='green')
s.set_urls(filtered_merged['Link'].values)
# Color cars that are new red
if self.last_update is not None:
self.data['Date'] = pd.to_datetime(self.data['Date'], format='%Y-%m-%d %H:%M')
recent_data = self.data[self.data['Date'] > self.last_update]
recent_merged = pd.merge(recent_data, self.data, how='inner') # Not sure why I have to do this
s = plt.scatter(x=recent_merged[filter_by], y=recent_merged['Price'], color='red')
s.set_urls(recent_merged['Link'].values)
if plot_show:
plt.show()
if not os.path.exists(".\\Best" + filter_by):
os.makedirs(".\\Best" + filter_by)
fig.canvas.print_figure(".\\Best" + filter_by + "\\" + self.file_name[:-4] + '.svg')
def best_fit(self, col='Age', verbose=False):
param_grid = {'polynomialfeatures__degree': np.arange(2, 3, 1),
'linearregression__fit_intercept': [True, False],
'linearregression__normalize': [True, False]}
plt.subplot(1, 2, 1)
if col == 'Age':
group = self.data.groupby(self.data['Age'])[['Price']].mean().reset_index()['Age'].values
mean_costs = self.data.groupby(self.data['Age'])[['Price']].mean().reset_index()['Price'].values
median_costs = self.data.groupby(self.data['Age'])[['Price']].median().reset_index()['Price'].values
metric = ((mean_costs + median_costs)/2)
X_test = np.linspace(group[0], group[-1], len(group)+2)[:, None]
X = group.reshape(len(group), 1)
y = metric
plt.xlim(xmin=0, xmax=18)
else:
temp_df = self.data[[col, "Price"]].dropna(axis=0, how='any') # Remove rows with missing values
X = temp_df[col].values.reshape(len(temp_df), 1)
y = temp_df["Price"].values
X_test = np.linspace(min(temp_df[col]), max(temp_df[col]), 1000)[:, None]
plt.xlim(xmin=0, xmax=250000)
grid = GridSearchCV(make_pipeline(PolynomialFeatures(2), LinearRegression()), param_grid)
grid.fit(X, y)
model = grid.best_estimator_
if verbose:
plt.scatter(X.ravel(), y)
y_test = model.fit(X, y).predict(X_test)
plt.plot(X_test.ravel(), y_test, label=self.car)
plt.ylim(ymin=0, ymax=25000)
plt.title(col + ' vs Cost')
plt.grid(which='both')
plt.legend()
# Plot its derivative too - Shows depreciation rate better
plt.subplot(1, 2, 2)
best_order = grid.best_params_["polynomialfeatures__degree"]
coeffs = np.polyfit(X.ravel(), y, best_order)
p = np.poly1d(np.negative(coeffs))
if col == 'Mileage':
p *= 10000 # Convert derivative to $ per 10,000 miles instead of $ per mile
plt.xlim(xmin=0, xmax=300000)
else:
plt.xlim(xmin=0, xmax=18)
p2 = np.polyder(p)
plt.plot(X_test.ravel(), p2(X_test.ravel()), label=self.car)
plt.ylim(ymin=0, ymax=3000)
plt.title(col + ' vs Cost')
plt.grid(which='both')
plt.legend()
# Add predicted value of data to csv file
p = np.poly1d(coeffs)
p2 = np.polyder(p)
if col == 'Age':
self.data['Price Diff Age'] = self.data['Age'] - p(self.data['Age'])
self.data['Depreciate Age'] = p2(self.data['Age'])
if col == 'Mileage':
self.data['Price Diff Mileage'] = self.data['Mileage'] - p(self.data['Mileage'])
self.data['Depreciate Mileage'] = p2(self.data['Mileage'])*10000
return coeffs
```
#### File: pricebot/Pages/Craigslist.py
```python
import pandas as pd
from ..Item import *
from ..Browser import Browser
from datetime import datetime as dt
from craigslist import CraigslistForSale
from craigslist import CraigslistHousing
class Craigslist(object):
def __init__(self, site="Boston"):
self.site = site
self.bro = None
self.last_update = None
def get_car_results(self, make=None, model=None, zip_code='01923', radius=50, overwrite=False):
if not make:
make = raw_input("Make: ").capitalize()
if not model:
model = raw_input("Model: ").capitalize()
if not zip_code:
zip_code = raw_input("Zip: ")
if not radius:
radius = raw_input("Radius: ")
cl = CraigslistForSale(site=self.site.lower(), category='cta', filters={'zip_code': zip_code, 'search_distance': radius, 'query': make + ' ' + model, 'make': make, 'model': model, 'min_price': 500})
ads = cl.get_results(sort_by='newest')
fname = make + model + '_' + self.site + 'Craigslist.csv'
# If data file already exists, only update it with new data (by grabbing latest date)
if os.path.isfile(".\\Data\\" + fname):
df = pd.read_csv(".\\Data\\" + fname, usecols=['Date'])
df['Date'] = pd.to_datetime(df['Date'])
self.last_update = str(max(df['Date']))
print("Grabbing data after " + self.last_update)
self.bro = Browser("https://" + self.site + ".craigslist.org/")
ads_info = []
for ad in ads:
print len(ads_info) # Some indication of progress
ad_info = {}
ad_info['Title'] = ad['name']
ad_info['Link'] = ad['url']
ad_info['Price'] = ad['price'][1:]
ad_info['Date'] = ad['datetime']
ad_info['Year'] = get_year(ad_info['Title']) # Get year from title text
self.bro.driver.get(ad_info['Link']) # Go to page link
if self.last_update:
if dt.strptime(ad_info['Date'], "%Y-%m-%d %H:%M") <= dt.strptime(self.last_update, "%Y-%m-%d %H:%M:%S"):
break # If we already have the data, dont grab it again - stop the process, since its sorted by date
attrs = self.bro.driver.find_elements_by_xpath("//p[@class='attrgroup']//span")
attr_texts = [attr.text for attr in attrs]
attrs_to_keep = ["condition", "odometer", "color", "transmission", "type"]
for attr in attr_texts:
key = next((attr.split(':')[0].strip() for x in attr.split(':')[0].strip().split() if x in attrs_to_keep), '')
if key:
try:
value = next((attr.split(':')[1].strip() for x in attr.split(':')[0].strip().split() if x in attrs_to_keep), '')
if key == "odometer": # ToDo: Probably a better spot to put this
key = "Mileage"
ad_info[key] = value
except:
pass
ads_info.append(ad_info)
self.bro.driver.close()
# Save data to csv file
if len(ads_info) > 0:
if os.path.isfile(".\\Data\\" + fname) and not overwrite:
temp_df = pd.read_csv(".\\Data\\" + fname)
temp_df = temp_df.append(ads_info)
write_to_csv(temp_df, fname)
else:
write_to_csv(ads_info, fname)
return self.last_update
def get_apt_results(self, zip_code='01923', radius=20, max_price=1600, sub_category=None, overwrite=False):
cl = CraigslistHousing(site=self.site.lower(), category=sub_category + '/aap', filters={'zip_code': zip_code, 'search_distance': radius, 'min_price': 500, 'max_price': max_price})
results = cl.get_results()
# If data file already exists, only update it with new data (by grabbing latest date)
fname = 'Apartments_' + self.site + 'Craigslist.csv'
if not overwrite and os.path.isfile(".\\Data\\" + fname):
with open(".\\Data\\" + fname) as f:
self.last_update = f.readlines()[1].split(',')[2]
print("Grabbing data after " + self.last_update)
ads_info = []
for result in results:
print len(ads_info) # Some indication of progress
ad_info = {}
def get_attr(ad, attr):
try: return ad[attr]
except: return ''
ad_info['Title'] = get_attr(result, 'name')
ad_info['Area'] = get_attr(result, 'area')
ad_info['Bedrooms'] = get_attr(result, 'bedrooms')
ad_info['Link'] = get_attr(result, 'url')
ad_info['Price'] = get_attr(result, 'price')
ad_info['Location'] = get_attr(result, 'geotag')
ad_info['Date'] = get_attr(result, 'datetime')
if self.last_update:
if dt.strptime(ad_info['Date'], "%Y-%m-%d %H:%M") <= dt.strptime(self.last_update, "%Y-%m-%d %H:%M:%S"):
break # If we already have the data, dont grab it again - stop the process, since its sorted by date
ads_info.append(ad_info)
# Save data to csv file
if len(ads_info) > 0:
if os.path.isfile(".\\Data\\" + fname) and not overwrite:
temp_df = pd.read_csv(".\\Data\\" + fname)
temp_df = temp_df.append(ads_info)
write_to_csv(temp_df, fname)
else:
write_to_csv(ads_info, fname)
``` |
{
"source": "joshwilsonnc/youtube-shuffle",
"score": 2
} |
#### File: joshwilsonnc/youtube-shuffle/api.py
```python
import datetime
import json
import os
import cherrypy
import googleapiclient.discovery
import googleapiclient.errors
import requests
client = None
def isPlaylistItemAVideo(item):
return item['snippet']['resourceId']['kind'] == 'youtube#video'
def mapPlaylistItem(item):
return {
'id': item['snippet']['resourceId']['videoId'],
'title': item['snippet']['title'],
'channel': item['snippet']['channelTitle'],
'position': item['snippet']['position']
}
def extractPlaylistInformation(response):
if len(response['items']) > 0:
return {
'title': response['items'][0]['snippet']['title'],
'channel': response['items'][0]['snippet']['channelTitle'],
}
return {}
def memoizeWeekly(f):
memo = {}
def helper(x=None):
if x not in memo or memo[x]['time'] + datetime.timedelta(weeks=1) < datetime.datetime.now():
memoized = {
'item': f(x),
'time': datetime.datetime.now()
}
memo[x] = memoized
return memo[x]['item']
return helper
def get_youtube_client():
api_service_name = "youtube"
api_version = "v3"
api_key = os.environ.get('YOUTUBE_API_KEY', None)
if not api_key:
print("'YOUTUBE_API_KEY' environment variable is required but not set")
return None
return googleapiclient.discovery.build(
api_service_name, api_version, developerKey=api_key)
@memoizeWeekly
def get_playlist_information(playlistId):
part = 'id,snippet'
youtube = get_youtube_client()
request = youtube.playlists().list(
part=part, id=playlistId, maxResults='50')
response = request.execute()
if response:
return extractPlaylistInformation(response)
return {}
@memoizeWeekly
def get_playlist_items(playlistId):
part = 'id,snippet'
youtube = get_youtube_client()
request = youtube.playlistItems().list(
part=part, playlistId=playlistId, maxResults='50')
response = request.execute()
items = []
if response and 'items' in response:
items.extend(response['items'])
while response and 'nextPageToken' in response:
request = youtube.playlistItems().list(part=part, playlistId=playlistId,
maxResults='50', pageToken=response['nextPageToken'])
response = request.execute()
if response and 'items' in response:
items.extend(response['items'])
return [mapPlaylistItem(item) for item in items if isPlaylistItemAVideo(item)]
class StaticWebsiteService(object):
pass
class YoutubePlaylistService(object):
@cherrypy.expose
def items(self, playlistId):
return json.dumps(get_playlist_items(playlistId))
@cherrypy.expose
def info(self, playlistId):
return json.dumps(get_playlist_information(playlistId))
if __name__ == '__main__':
project_root = os.environ.get('YOUTUBE_SHUFFLE_ROOT', None)
if not project_root:
print("'YOUTUBE_SHUFFLE_ROOT' environment variable is required but not set")
exit
client = get_youtube_client()
if not client:
print('Failed to initialize Google API client.')
exit
log_dir = os.environ.get('YOUTUBE_SHUFFLE_LOG_DIR', '.')
cherrypy.config.update({
'log.screen': False,
'log.access_file': os.path.join(log_dir, 'access.log'),
'log.error_file': os.path.join(log_dir, 'error.log'),
'tools.staticdir.root': project_root
})
cherrypy.server.socket_host = '0.0.0.0'
cherrypy.tree.mount(StaticWebsiteService(), '/', 'config/static.cfg')
cherrypy.quickstart(YoutubePlaylistService(), '/api')
cherrypy.engine.start()
cherrypy.engine.block()
``` |
{
"source": "joshwkearney/lofreq",
"score": 3
} |
#### File: tools/lofreq_star/multiple_testing.py
```python
__author__ = "<NAME>, <NAME>, <NAME>"
__email__ = "<EMAIL>"
#__copyright__ = ""
__license__ = "BSD"
from itertools import groupby
class AbstractCorrection(object):
def __init__(self, pvals, a=.05, n=None):
self.pvals = self.corrected_pvals = list(pvals)
# number of multiple tests
if n:
assert n>len(pvals)
self.n = n
else:
self.n = len(self.pvals)
# type-1 error cutoff for each test
self.a = a
self.set_correction()
def set_correction(self):
# the purpose of multiple correction is to lower the alpha
# instead of the canonical value (like .05)
pass
class Bonferroni(AbstractCorrection):
"""http://en.wikipedia.org/wiki/Bonferroni_correction
>>> ["%.4f" % v for v in Bonferroni([0.01, 0.01, 0.03, 0.05, 0.005], a=0.05).corrected_pvals]
['0.0500', '0.0500', '0.1500', '0.2500', '0.0250']
"""
def set_correction(self):
self.corrected_pvals = [pv * self.n
for pv in self.corrected_pvals]
class Sidak(AbstractCorrection):
"""http://en.wikipedia.org/wiki/Bonferroni_correction
>>> ["%.8f" % v for v in Sidak([0.01, 0.01, 0.03, 0.05, 0.005], a=0.05).corrected_pvals]
['0.04898974', '0.04898974', '0.14696923', '0.24494871', '0.02449487']
"""
def set_correction(self):
if self.n != 0:
correction = self.a * 1. / (1 - (1 - self.a) ** (1. / self.n))
else:
correction = 1
self.corrected_pvals = [pv * correction
for pv in self.corrected_pvals]
class HolmBonferroni(AbstractCorrection):
"""http://en.wikipedia.org/wiki/Holm-Bonferroni_method
given a list of pvals, perform the Holm-Bonferroni correction
and return the indexes from original list that are significant.
(cant use p-value as that may be repeated.)
>>> ["%.4f" % v for v in HolmBonferroni([0.01, 0.01, 0.03, 0.05, 0.005], a=0.05).corrected_pvals]
['0.0400', '0.0400', '0.0600', '0.0500', '0.0250']
"""
def set_correction(self):
if len(self.pvals):
for (i, c) in self.generate_significant():
self.corrected_pvals[i] *= c
def generate_significant(self):
pvals = self.pvals
pvals_idxs = zip(pvals, range(len(pvals)))
pvals_idxs = sorted(pvals_idxs)
#lp = len(self.pvals)
lp = self.n
for pval, idxs in groupby(pvals_idxs, lambda x: x[0]):
idxs = list(idxs)
for p, i in idxs:
if p * 1. / lp < self.a:
yield (i, lp)
lp -= len(idxs)
# also in the original file, but removed here:
#class FDR
#def calc_qval
if __name__ == '__main__':
import doctest
doctest.testmod()
```
#### File: tools/scripts/lofreq2_add_fake_gt.py
```python
import sys
import os
import argparse
import logging
import csv
import gzip
#--- third-party imports
#
#/
#--- project specific imports
#
# /
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "2016 Genome Institute of Singapore"
__license__ = "The MIT License"
# global logger
#
LOG = logging.getLogger("")
logging.basicConfig(level=logging.WARN,
format='%(levelname)s [%(asctime)s]: %(message)s')
FORMAT_HEADER = '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">'
def cmdline_parser():
"""
creates an OptionParser instance
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--verbose",
action="store_true",
dest="verbose",
help="be verbose")
parser.add_argument("--debug",
action="store_true",
dest="debug",
help="enable debugging")
parser.add_argument("-i", "--vcf-in",
dest="vcf_in",
required=True,
help="Input vcf file listing somatic variants"
" (gzip supported; - for stdin).")
default = "-"
parser.add_argument("-o", "--vcf-out",
dest="vcf_out",
default=default,
help="Output vcf file (gzip supported; - for stdout;"
" default: %s)." % default)
parser.add_argument("-s", "--samples",
required=True, nargs='+',
help="Sample name/s")
return parser
def add_fake_gt(vcf_in, vcf_out, sample_names):
"""Add fake genotype to header and variants"""
assert len(set(sample_names)) == len(sample_names), ("Duplicate sample names found")
# set up vcf_reader
#
if vcf_in == '-':
fh_in = sys.stdin
else:
assert os.path.exists(vcf_in)
if vcf_in[-3:] == ".gz":
fh_in = gzip.open(vcf_in, 'rt')
else:
fh_in = open(vcf_in, 'rt')
vcf_reader = csv.reader(fh_in, delimiter='\t')
# set up vcf_writer/fh_out
#
if vcf_out == '-':
fh_out = sys.stdout
else:
assert not os.path.exists(vcf_out)
if vcf_out[-3:] == ".gz":
fh_out = gzip.open(vcf_out, 'wb')
else:
fh_out = open(vcf_out, 'wb')
vcf_writer = csv.writer(fh_out, delimiter='\t',
quotechar='', quoting=csv.QUOTE_NONE,
lineterminator=os.linesep)
has_our_format_in_header = False
for row in vcf_reader:
# modify row if needed and finally print
if row[0].startswith('##'):
if row[0].startswith('##FORMAT'):
if row[0] == FORMAT_HEADER:
has_our_format_in_header = True
else:
LOG.fatal("Incompatible, pre-existing format definition found. Exiting")
raise ValueError(row)
# don't touch header
elif row[0].startswith('#CHROM'):
# insert genotype format line
if not has_our_format_in_header:
extrarow = ['##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">']
vcf_writer.writerow(extrarow)
if not "FORMAT" in row:
row.append("FORMAT")
for name in sample_names:
row.append(name)
else:
# add format and sample columns
assert len(row) >= 8, (
"variant incomplete or FORMAT column already exists")
# Add GT column if not present
if len(row) > 8:
assert row[8] == 'GT'
else:
row.append("GT")
# Add fake GT
for _ in sample_names:
row.append(".")
vcf_writer.writerow(row)
if fh_in != sys.stdin:
fh_in.close()
if fh_out != sys.stdout:
fh_out.close()
def main():
"""main function
"""
parser = cmdline_parser()
args = parser.parse_args()
if args.verbose:
LOG.setLevel(logging.INFO)
if args.debug:
LOG.setLevel(logging.DEBUG)
for (in_file, descr) in [#(args.bam, "BAM"),
(args.vcf_in, "VCF input")]:
if not in_file:
parser.error("%s file argument missing." % descr)
sys.exit(1)
if not os.path.exists(in_file) and in_file != "-":
LOG.fatal("file '%s' does not exist.\n", in_file)
sys.exit(1)
for (out_file, descr) in [(args.vcf_out, "VCF output")]:
if not out_file:
parser.error("%s output file argument missing." % descr)
sys.exit(1)
if os.path.exists(out_file) and out_file != "-":
LOG.fatal("Cowardly refusing to overwrite existing"
" output file '%s'.\n", out_file)
sys.exit(1)
add_fake_gt(args.vcf_in, args.vcf_out, args.samples)
if __name__ == "__main__":
main()
LOG.info("Successful program exit")
```
#### File: tools/scripts/lofreq2_add_sample.py
```python
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "2014 Genome Institute of Singapore"
__license__ = "The MIT License"
# --- standard library imports
#
import sys
import os
import argparse
import logging
from collections import OrderedDict, namedtuple
import csv
import gzip
#--- third-party imports
#
import pysam
assert [int(x) for x in pysam.__version__.split('.')] >= [0, 8, 2]
#--- project specific imports
#
# /
# global logger
#
LOG = logging.getLogger("")
logging.basicConfig(level=logging.WARN,
format='%(levelname)s [%(asctime)s]: %(message)s')
Variant = namedtuple('Variant',
['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO'])
# all fields except POS (int) are strings and values are preserved as-is
Format = namedtuple('Format',
['id', 'num', 'type', 'descr'])
def median(data):
"""compute median of provided list"""
if not len(data):
return None
# http://stackoverflow.com/questions/10482339/how-to-find-median/10482422#10482422 answer by user3100512
return sorted(data)[len(data)//2]
def cmdline_parser():
"""
creates an OptionParser instance
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--verbose",
action="store_true",
dest="verbose",
help="be verbose")
parser.add_argument("--debug",
action="store_true",
dest="debug",
help="enable debugging")
parser.add_argument("-i", "--vcf-in",
dest="vcf_in",
required=True,
help="Input vcf file listing somatic variants"
" (gzip supported; - for stdin).")
default = "-"
parser.add_argument("-o", "--vcf-out",
dest="vcf_out",
default=default,
help="Output vcf file (gzip supported; - for stdout;"
" default: %s)." % default)
parser.add_argument("-b", "--bam",
dest="bams", nargs="*",
required=True,
help="BAM files, e.g. normal and tumor bam")
return parser
def fmt_to_line(fmt):
"""convert format class to vcf line"""
return "##FORMAT=<ID=%s,Number=%s,Type=%s,Description=\"%s\">" % (
fmt.id, fmt.num, fmt.type, fmt.descr)
def gen_formats():
"""Must be in sync with gen_plp_data
"""
formats = OrderedDict()
for (fid, num_str, type_str, descr) in [
('DP', '1', 'Integer', 'Read depth at this position for this sample'),# standard
('NR', '1', 'Integer', 'Number of reference bases'),
('NA', '1', 'Integer', 'Number of alternate bases'),
('OR', '1', 'Integer', 'Number of orphan reads supporting reference bases'),
('OA', '1', 'Integer', 'Number of orphan reads supporting alternate bases'),
('BR', '3', 'Integer', 'Minimum, median and maximum base-qualities for reference bases'),
('BA', '3', 'Integer', 'Minimum, median and maximum base-qualities for alternate bases'),
('MR', '3', 'Integer', 'Minimum, median and maximum mapping-qualities for reference bases'),
('MA', '3', 'Integer', 'Minimum, median and maximum mapping-qualities for alternate bases')]:
formats[fid] = Format(id=fid, num=num_str, type=type_str, descr=descr)
return formats
def gen_plp_data(sam_fh, var):
"""generate data must be in sync with gen_formats()
"""
for plp_col in sam_fh.pileup(var.CHROM, var.POS-1, var.POS):
# pileup() extracts all reads overlapping that region.
# only look at the one of interest
if plp_col.pos != var.POS-1:
continue
cov = plp_col.n
bqs = {'ref': [], 'alt': []}
mqs = {'ref': [], 'alt': []}
num_orphans = {'ref': 0, 'alt': 0}
for plp_read in plp_col.pileups:
aln_read = plp_read.alignment
# most minimal filtering
if aln_read.is_unmapped or aln_read.is_secondary or \
aln_read.is_qcfail or aln_read.is_duplicate:
continue
if aln_read.is_paired and aln_read.mate_is_unmapped:
assert not aln_read.is_unmapped
is_orphan = True
else:
is_orphan = False
base = aln_read.seq[plp_read.query_position]
mq = aln_read.mapq
bq = ord(aln_read.qual[plp_read.query_position])-33
if base == var.REF:
k = 'ref'
elif base == var.ALT[0]:
k = 'alt'
else:
continue
bqs[k].append(bq)
mqs[k].append(mq)
if is_orphan:
num_orphans[k] += 1
(min_bqs, median_bqs, max_bqs) = (
{'ref': -1, 'alt': -1},
{'ref': -1, 'alt': -1},
{'ref': -1, 'alt': -1})
(min_mqs, median_mqs, max_mqs) = (
{'ref': -1, 'alt': -1},
{'ref': -1, 'alt': -1},
{'ref': -1, 'alt': -1})
for k in ['ref', 'alt']:
if len(bqs[k]):
(min_bqs[k], median_bqs[k], max_bqs[k]) = (
min(bqs[k]), median(bqs[k]), max(bqs[k]))
if len(mqs[k]):
(min_mqs[k], median_mqs[k], max_mqs[k]) = (
min(mqs[k]), median(mqs[k]), max(mqs[k]))
sample_data = OrderedDict()
for (fmt_key, val) in [
('DP', "%d" % cov),
('NR', "%d" % len(bqs['ref'])),
('NA', "%d" % len(bqs['alt'])),
('OR', "%d" % num_orphans['ref']),
('OA', "%d" % num_orphans['alt']),
('BR', "%d,%d,%d" % (min_bqs['ref'], median_bqs['ref'], max_bqs['ref'])),
('BA', "%d,%d,%d" % (min_bqs['alt'], median_bqs['alt'], max_bqs['alt'])),
('MR', "%d,%d,%d" % (min_mqs['ref'], median_mqs['ref'], max_mqs['ref'])),
('MA', "%d,%d,%d" % (min_mqs['alt'], median_mqs['alt'], max_mqs['alt']))]:
sample_data[fmt_key] = val
return sample_data
def add_plp_to_vcf(vcf_in, vcf_out, bam_files):
"""process each var in vcf_in and add plp info from sam_fh,
writing to vcf_out. is no way to edit/add format fields in current
versions of pyvcf (as of 2014-06-30). see discussion here
https://github.com/jamescasbon/PyVCF/issues/82 for patches and
workarounds. chose to use csv module instead for simplicity
"""
assert all([os.path.exists(b) for b in bam_files])
# set up vcf_reader
#
if vcf_in == '-':
fh_in = sys.stdin
else:
assert os.path.exists(vcf_in)
if vcf_in[-3:] == ".gz":
fh_in = gzip.open(vcf_in, 'rb')
else:
fh_in = open(vcf_in, 'rb')
vcf_reader = csv.reader(fh_in, delimiter='\t')
# set up vcf_writer/fh_out
#
if vcf_out == '-':
fh_out = sys.stdout
else:
assert not os.path.exists(vcf_out)
if vcf_out[-3:] == ".gz":
fh_out = gzip.open(vcf_out, 'wb')
else:
fh_out = open(vcf_out, 'wb')
vcf_writer = csv.writer(fh_out, delimiter='\t',
quotechar='', quoting=csv.QUOTE_NONE,
lineterminator=os.linesep)
formats = gen_formats()
for row in vcf_reader:
if row[0].startswith('#'):
if row[0] == "#CHROM":
assert len(row) == 8, (
"variant incomplete or FORMAT column already exists")
# before writing header, add our format description.
for fmt in formats.values():
vcf_writer.writerow([fmt_to_line(fmt)])
row.append("FORMAT")
for bam in bam_files:
row.append(os.path.basename(bam))
vcf_writer.writerow(row)
else:
assert len(row) == 8, (
"variant incomplete or FORMAT column already exists")
var = Variant._make([row[0], int(row[1]), row[2], row[3],
row[4], row[5], row[6], row[7]])
# no support for indels
if 'INDEL' in var.INFO.split(';') or len(var.REF) > 1 or len(var.ALT) > 1:
LOG.warn("Skipping unsupported variant) %s:%d:%s" % (
var.CHROM, var.POS, var.REF))
continue
row.append(':'.join(formats.keys()))
for bam in bam_files:
assert os.path.exists(bam)
sam_fh = pysam.AlignmentFile(bam)
sample_data = gen_plp_data(sam_fh, var)
assert sample_data.keys() == formats.keys(), (
"sample keys (%s) != format keys (%s)" % (sample_data.keys(), formats.keys()))
row.append(':'.join(sample_data.values()))
vcf_writer.writerow(row)
if fh_in != sys.stdin:
fh_in.close()
if fh_out != sys.stdout:
fh_out.close()
def main():
"""main function
"""
parser = cmdline_parser()
args = parser.parse_args()
if args.verbose:
LOG.setLevel(logging.INFO)
if args.debug:
LOG.setLevel(logging.DEBUG)
for (in_file, descr) in [#(args.bam, "BAM"),
(args.vcf_in, "VCF input")]:
if not in_file:
parser.error("%s file argument missing." % descr)
sys.exit(1)
if not os.path.exists(in_file) and in_file != "-":
LOG.fatal("file '%s' does not exist.\n" % in_file)
sys.exit(1)
for (out_file, descr) in [(args.vcf_out, "VCF output")]:
if not out_file:
parser.error("%s output file argument missing." % descr)
sys.exit(1)
if os.path.exists(out_file) and out_file != "-":
LOG.fatal("Cowardly refusing to overwrite existing"
" output file '%s'.\n" % out_file)
sys.exit(1)
add_plp_to_vcf(args.vcf_in, args.vcf_out, args.bams)
if __name__ == "__main__":
main()
LOG.info("Successful program exit")
``` |
{
"source": "Joshwlks/RL-SLT",
"score": 3
} |
#### File: env/reward/bleu.py
```python
from __future__ import division
import numpy as np
import math
import fractions
from nltk.util import ngrams
from collections import Counter
from fractions import Fraction
def sentence_bleu(references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None):
"""
:param references: reference sentences
:type references: list(list(str))
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:return: The sentence-level BLEU score.
:rtype: float
"""
return corpus_bleu([references], [hypothesis], weights, smoothing_function)
def corpus_bleu(list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None):
"""
:param references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
assert len(list_of_references) == len(hypotheses), "The numer of hypotheses and their reference(s) should be the same"
# Iterate through each hypothesis and their corresponding references.
for references, hypothesis in zip(list_of_references, hypotheses):
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(references, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
# Calculate corpus-level brevity penalty.
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Collects the various precision values for the different ngram orders.
p_n = [Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i, _ in enumerate(weights, start=1)]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0, 0
# Smoothen the modified precision.
# Note: smooth_precision() converts values into float.
if not smoothing_function:
smoothing_function = SmoothingFunction().method0
p_n = smoothing_function(p_n, references=references,
hypothesis=hypothesis, hyp_len=hyp_len)
# Calculates the overall modified precision for all ngrams.
# By sum of the product of the weights and the respective *p_n*
s = (w * math.log(p_i) for w, p_i in zip(weights, p_n)
if p_i.numerator != 0)
# return bp * math.exp(math.fsum(s))
bleu = math.exp(math.fsum(s))
bleup = bleu * bp
return bleu, bleup
def modified_precision(references, hypothesis, n):
"""
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction
"""
# Extracts all ngrams in hypothesis.
counts = Counter(ngrams(hypothesis, n))
# Extract a union of references' counts.
## max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
max_counts = {}
for reference in references:
reference_counts = Counter(ngrams(reference, n))
for ngram in counts:
max_counts[ngram] = max(max_counts.get(ngram, 0),
reference_counts[ngram])
# Assigns the intersection between hypothesis and references' counts.
clipped_counts = {ngram: min(count, max_counts[ngram])
for ngram, count in counts.items()}
numerator = sum(clipped_counts.values())
# Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
# Usually this happens when the ngram order is > len(reference).
denominator = max(1, sum(counts.values()))
return Fraction(numerator, denominator, _normalize=False)
def closest_ref_length(references, hyp_len):
'''
This function finds the reference length that is closest in length to the
hypothesis. The closest reference length is referred to as *r* variable
from the brevity penalty formula in Papineni et. al. (2002)
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: The length of the hypothesis.
:type hypothesis: int
:return: The length of the reference that's closest to the hypothesis.
:rtype: int
'''
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(ref_lens, key=lambda ref_len:
(abs(ref_len - hyp_len), ref_len))
return closest_ref_len
def brevity_penalty(closest_ref_len, hyp_len):
"""
:param hyp_len: The length of the hypothesis for a single sentence OR the
sum of all the hypotheses' lengths for a corpus
:type hyp_len: int
:param closest_ref_len: The length of the closest reference for a single
hypothesis OR the sum of all the closest references for every hypotheses.
:type closest_reference_len: int
:return: BLEU's brevity penalty.
:rtype: float
"""
if hyp_len > closest_ref_len:
return 1
# If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0
elif hyp_len == 0:
return 0
else:
return math.exp(1 - closest_ref_len / hyp_len)
class SmoothingFunction:
"""
This is an implementation of the smoothing techniques
for segment-level BLEU scores that was presented in
<NAME> and <NAME> (2014) A Systematic Comparison of
Smoothing Techniques for Sentence-Level BLEU. In WMT14.
http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
"""
def __init__(self, epsilon=0.1, alpha=5, k=5):
"""
:param epsilon: the epsilon value use in method 1
:type epsilon: float
:param alpha: the alpha value use in method 6
:type alpha: int
:param k: the k value use in method 4
:type k: int
"""
self.epsilon = epsilon
self.alpha = alpha
self.k = k
def method0(self, p_n, *args, **kwargs):
""" No smoothing. """
return p_n
def method5(self, p_n, references, hypothesis, hyp_len):
"""
Smoothing method 5:
The matched counts for similar values of n should be similar. To a
calculate the n-gram matched count, it averages the n−1, n and n+1 gram
matched counts.
"""
m = {}
# Requires an precision value for an addition ngram order.
p_n_plus1 = p_n + [modified_precision(references, hypothesis, 5)]
m[-1] = p_n[0] + 1
for i, p_i in enumerate(p_n):
p_n[i] = (m[i - 1] + p_i + p_n_plus1[i + 1]) / 3
m[i] = p_n[i]
return p_n
### for the reward function ###
def BLEUwithForget(beta=None, discount=1., return_quality=False, **_k):
# init
words = _k['words'].split() # end-of-sentence is treated as a word
ref = _k['reference']
q0 = numpy.zeros((_k['steps'],))
# check 0, 1
maps = [(it, a) for it, a in enumerate(_k['act']) if a < 2]
kmap = len(maps)
lb = numpy.zeros((kmap,))
ts = numpy.zeros((kmap,))
q = numpy.zeros((kmap,))
if not beta:
beta = kmap
beta = 1. / float(beta)
chencherry = SmoothingFunction()
# compute BLEU for each Yt
Y = []
bleus = []
truebleus = []
if len(words) == 0:
bleus = [0]
truebleus = [0]
for t in xrange(len(words)):
if len(Y) > 0:
_temp = Y[-1] + ' ' + words[t]
_temp = _temp.replace('@@ ', '')
Y = Y[:-1] + _temp.split()
else:
Y = [words[t]]
bb = sentence_bleu(ref, Y, smoothing_function=chencherry.method5)
bleus.append(bb[1]) # try true BLEU
truebleus.append(bb[1])
# print 'Latency BLEU', lbn
bleus = [0] + bleus # use TRUE BLEU
bleus = numpy.array(bleus)
temp = bleus[1:] - bleus[:-1]
tpos = 0
for pos, (it, a) in enumerate(maps):
if (a == 1) and (tpos < len(words)):
q[pos] = temp[tpos]
q0[it] = q[pos]
tpos += 1
# add the whole sentence balance on it
q0[-1] = truebleus[-1] # the last BLEU we use the real BLEU score.
return q0
if __name__ == "__main__":
hyps="S'il vous plaît, rendez-vous à l'aise."
hyps=hyps.split()
refs=[hyps]
print(f"hyps: {refs}")
print(f"The BLEU score for the sentence: {sentence_bleu(refs,hyps, smoothing_function=SmoothingFunction().method0)}")
``` |
{
"source": "JoshWorld/FastCampus-Python-Django",
"score": 4
} |
#### File: codefights/0518/task_0518.py
```python
def incrementalBackups(lastBackupTime, changes):
if changes == []:
return []
sorted_list = sorted(changes, key=lambda data:data[1])
result_list = []
for data in sorted_list:
if lastBackupTime < data[0]:
if data[1] not in result_list:
result_list.append(data[1])
return result_list
```
#### File: FastCampus-Python-Django/Week-2/06_lambda_study.py
```python
(lambda x,y: x + y)(2, 3)
(lambda x, y: x ** y)(2, 3)
# [1, 2, 3] => [1, 4, 9]
# map(func, seq)
def square(x):
return x ** 2
a = list(map(square, [1, 2, 3]))
# 위에꺼 좀 더 리팩토링 해보자
# 람다로 바꿔보자
list(map(lambda x: x ** 2, [1, 2, 3]))
# 1 + 2 => 3
# 2 + 3 => 5
# 4 + 3 => 7
list(map(lambda x,y:x + y, [1, 2, 3], [2, 3, 4]))
list(map(lambda x,y:x + y, (1, 2, 3), (2, 3, 4)))
# 짧은거 기준까지 더함
# 2 + 2 = 4
# 3 + 3 = 6
# 4 + 4 = 8
list(map(lambda x,y:x + y, [2, 3, 4, 5], [2, 3, 4]))
# filter
# filter(func, seq)
# 조건을 줄때
number_list = list(range(1, 101))
# 짝수 구하기
list(filter(lambda x: x % 2 == 0, number_list))
list(filter(lambda x: x > 5, number_list))
# Reduce
# 줄이는 아이
# 하나만 남기는 아이
# Python3 => functools로 분리 import필요
# 배열의 요소들의 합
# 연산과정
'''
10 + 20 = 30
30 + 30 = 60
60 + 40 = 100
'''
import functools
functools.reduce(lambda x,y: x + y, [10, 20, 30, 40])
# 리스트중에 가장 큰 값 찾기
# [참일때의 값] if [조건문] else [거짓일때의 값]
# 최대값은 x와y를 비교하면 10>20이 false기 때문에 20이 남는다.
functools.reduce(lambda x,y: x if x>y else y, [10, 20, 30, 40])
# 최소값은 x와y를 비교하면 10<20이 true이기 때문에 10이 남는다.
functools.reduce(lambda x,y: x if x<y else y, [10, 20, 30, 40])
# 숫자인 애들만 제곱해서 새로운 리스트 만들기
# lambda에서 조건없이 연산을 해야 한다. => map
# lambda에서 조건으로 값을 걸러낸다. => filter
# lambda에서 하나의 값만 얻어내야 한다. => reduce
awesome_list = [1, 2, "안수찬", {}, 4, 5]
list(map(
lambda x: x ** 2,
filter(
lambda x: isinstance(x, int),
awesome_list
)
))
# 1~100까지 합 구하기
functools.reduce(
lambda x,y: x + y,
[i for i in range(1, 101)]
)
functools.reduce(
lambda x,y: x + y,
list(range(1, 101))
)
# 1~100까지 짝수 합
functools.reduce(
lambda x, y: x+y,
filter(
lambda x: x%2 == 0,
list(range(1, 101))
)
)
# FizzBuzz
# 3의배수 이면 Fizz
# 5의배수 이면 Buzz
# 15의배수 이면 FizzBuzz
# 3, 6, 9, 12, 15
# 5, 10, 15, 20, 25
# 15, 30, 45, 60
# 3으로도 나눠지고 5로도 나눠지면 15의 배수
'''
list(
filter(
lambda x: x%3 == 0 or x%5 == 0,
)
)
'''
# List Comprehension
# 3의 배수
[i for i in range(1, 101) if i % 3 == 0]
# 1~100까지 숫자 중에서 짝수인 애들의 제곱 리스트
[i ** 2 for i in range(1, 101) if i % 2 ==0]
# palindrome
# 기러기 => 기러기
# 소주만병만주소 => 소주만병만주소
# 정의부터 해놓고 보자
# 정의 => 문자열을 받아서, 뒤집었을때 같으면 True, 틀리면 False
word = "기러기"
len(word)
# 가장 기초적인 방법
def reverse(word):
reversed_word = ""
for i in range(len(word)):
reversed_word += word[len(word)-1-i]
return reversed_word
# python slice
# [start:end:step]
"기러기는기러기다"[::-1]
def is_palindrome(word):
return word == word[::-1]
is_palindrome("소주만병만주소")
# 한줄이다 lambda를 이용해보자!!
(lambda x: x == x[::-1])("가나")
``` |
{
"source": "JoshWorld/RT-OCF",
"score": 2
} |
#### File: tools/internal/common.py
```python
class Result:
def __init__(self, exitcode=0, message='', data={}):
self.exitcode = exitcode
self.message = message
self.data = data
```
#### File: tools/internal/linter.py
```python
import os
import subprocess
from internal.config import RT_OCF_ROOT_TOOLS_INTERNAL
from internal.common import Result
from internal.utils import execute_return_output
from internal.utils import print_fail, print_green
TIZEN_LINT_TOOL = os.path.join(
RT_OCF_ROOT_TOOLS_INTERNAL,
'checkpatch_tizen.pl')
class Linter:
def __init__(self):
pass
def execute(self, file_list, show_warning):
warning_count = {'error': 0, 'warning': 0}
for filepath in file_list:
if not filepath.endswith('.c') and not filepath.endswith('.h'):
continue
each_result = self.execute_lint(filepath, show_warning)
warning_count['error'] += each_result['error']
warning_count['warning'] += each_result['warning']
report = 'error: {}, warning: {}'.format(
warning_count['error'], warning_count['warning'])
exitcode = 0
if warning_count['error'] > 0:
exitcode = 1
print_fail('Catch you!!! You did not take code style seriously.')
return Result(message=report, exitcode=exitcode)
def execute_lint(self, fullpath, show_warning):
cmd = '{} {} 2> /dev/null'.format(TIZEN_LINT_TOOL, fullpath)
lint_result = {'error': 0, 'warning': 0}
print_green('$ ' + fullpath)
result = execute_return_output(cmd, is_print_cmd=False)
for line in result.data.split('\n'):
if 'ERROR: ' in line:
print(line.rstrip())
if show_warning and 'WARNING: ' in line:
print(line.rstrip())
if line.startswith('total: '):
splited = line.split(' ')
lint_result['error'] = int(splited[1])
lint_result['warning'] = int(splited[3])
if lint_result['error'] is not 0 or lint_result['warning'] is not 0:
print('{}, {}'.format(line.rstrip(), fullpath))
return lint_result
```
#### File: tools/internal/peak_calculator.py
```python
import os
import glob
import re
from internal.utils import execute
from internal.utils import execute_return_output
from internal.common import Result
class Peak:
def __init__(self):
self.total = 0
self.useful_heap = 0
self.extra_heap = 0
self.stacks = 0
def __str__(self):
output = 'total:{}, '.format(self.total)
output += 'useful_heap:{}, '.format(self.useful_heap)
output += 'extra_heap:{}, '.format(self.extra_heap)
output += 'stacks:{}'.format(self.stacks)
return output
def __repr__(self):
return self.__str__()
def __lt__(self, other):
return self.total < other.total
def __gt__(self, other):
return other.__lt__(self)
def __eq__(self, other):
return self.total == other.total
def __ne__(self, other):
return not self.__eq__(other)
class PeakCalculator:
def __init__(self):
pass
def calculate(self, data):
return self.parse_massif(data)
def parse_massif(self, massif_data):
target_snapshot = self.find_peak_memory_snap_no(massif_data)
return self.find_memory_info_by_snap_no(massif_data, target_snapshot)
def find_peak_memory_snap_no(self, massif_data):
peak_re = re.compile('(?P<snap_no>\d+) \(peak\)')
result = peak_re.findall(massif_data)
if len(result) is 0:
return None
return result[0]
def find_memory_info_by_snap_no(self, massif_data, snap_no):
memory_info_re = r"^\s+{}\s+([0-9,]+)\s+(?P<total>[0-9,]+)\s+(?P<useful_heap>[0-9,]+)\s+(?P<extra_heap>[0-9,]+)\s+(?P<stacks>[0-9,]+)".format(snap_no)
compiled_re = re.compile(memory_info_re, re.MULTILINE)
m = compiled_re.search(massif_data)
if m is None:
return None
dic = m.groupdict()
peak = Peak()
peak.total = int(dic['total'].replace(',', ''))
peak.useful_heap = int(dic['useful_heap'].replace(',', ''))
peak.extra_heap = int(dic['extra_heap'].replace(',', ''))
peak.stacks = int(dic['stacks'].replace(',', ''))
return peak
```
#### File: tools/internal/tizenrt_testresult_collector.py
```python
import glob
import serial
import sys
from internal.common import Result
import time
WIFI_SSID = 'ZEROROOT'
WIFI_PASSWORD = '<PASSWORD>'
class TestResultCollector:
def __init__(self, usb_device=None):
if usb_device is None:
usb_device = self.get_usb_tty_number()
self.serial = self.create_serial(usb_device)
def get_usb_tty_number(self):
ttyUSBs = glob.glob('/sys/class/tty/ttyUSB*')
if len(ttyUSBs) == 0:
print('TizenRT is not connected')
exit(1)
return '/dev/{}'.format(ttyUSBs[0].split('/')[-1])
def create_serial(self, usb_device):
return serial.Serial(usb_device, 115200, timeout=70)
def collect(self, options=''):
time.sleep(2)
self.write_connecting_wifi_command()
command = 'iot_rt_unittest ' + options + '\n'
self.serial.write(command)
return self.read_serial_output()
def write_connecting_wifi_command(self):
self.serial.write('wifi startsta\n')
time.sleep(2)
self.serial.write('wifi join {} {} wpa2_aes\n'.format(WIFI_SSID, WIFI_PASSWORD))
time.sleep(2)
self.serial.write('ifconfig wl1 dhcp\n')
time.sleep(2)
def read_serial_output(self):
while True:
line = self.serial.readline()
if line == '':
print('Timeout')
return Result(exitcode=1,
message='timeout: Core Dump may occur')
sys.stdout.write(line)
if self.is_test_result(line):
return Result(
exitcode=self.get_test_exitcode(line),
message=line)
if self.is_core_dump(line):
return Result(exitcode=1, message=line)
def get_test_exitcode(self, line):
arr = line.split(' ')
if arr[2] == '0':
return 0
return 1
def is_test_result(self, line):
return 'Tests' in line and 'Failure' in line and 'Ignored' in line
def is_core_dump(self, line):
return '(core dumped)' in line
def test_get_usb_tty_number():
assert '/dev/ttyUSB1' == TestResultCollector().get_usb_tty_number()
def test_create_serial():
assert None != TestResultCollector().create_serial('/dev/ttyUSB1')
def test_is_core_dump():
assert True == TestResultCollector().is_core_dump('Aborted (core dumped)')
```
#### File: RT-OCF/tools/memory.py
```python
import subprocess
import os
import sys
from internal.common import Result
from internal.utils import write_result
from internal.utils import execute
from internal.utils import execute_return_output
from internal.linux_adapter import LinuxAdapter
from internal.leak_calculator import LeakCalculator
from internal.peak_calculator import PeakCalculator
from internal.peak_calculator import Peak
from internal.RT_OCF_error import RT_OCFError
from internal.config import RT_OCF_FUNCTIONAL_TEST
from internal.config import CI_LINUX_LEAK_FILE_NAME
from internal.config import CI_LINUX_PEAK_FILE_NAME
import glob
FUNCTIONAL_TESTS = glob.glob(os.path.join(RT_OCF_FUNCTIONAL_TEST, 'test_*.py'))
PEAK_FAIL_MESSAGE = "Calculating memory peak is Failed T^T"
LEAK_FAIL_MESSAGE = "Calculating memory leak is Failed T^T"
def run(args):
adapter = LinuxAdapter()
if not args.skip_build:
adapter.distclean()
adapter.build()
result = Result()
leak_message = LEAK_FAIL_MESSAGE
peak_message = PEAK_FAIL_MESSAGE
try:
leak_dic = {}
peak_dic = {}
for binary in FUNCTIONAL_TESTS:
filename = os.path.basename(binary)
execute('rm -rf massif.out.*')
execute_output = execute_return_output('python {}'.format(binary))
leak_dic[filename] = LeakCalculator().calculate(execute_output.data)
massif_output = execute_return_output('ms_print massif.out.*')
peak_dic[filename] = PeakCalculator().calculate(massif_output.data)
leak_total = 0
peak_max = Peak()
for filename in leak_dic:
print('#############################################################')
print('-- {} --'.format(filename))
print(' Memory leak')
print(' {} bytes'.format(leak_dic[filename]))
print(' Memory peak')
print(' {}'.format(str(peak_dic[filename])))
print('#############################################################')
leak_total += leak_dic[filename]
if peak_dic[filename] > peak_max:
peak_max = peak_dic[filename]
leak_message = 'Memory Leak: {} bytes'.format(str(leak_total))
peak_message = 'Memory Peak: {}'.format(str(peak_max))
except RT_OCFError as e:
peak_message = PEAK_FAIL_MESSAGE
leak_message = LEAK_FAIL_MESSAGE
result = Result(exitcode=e.exitcode, message=e.message)
finally:
if args.is_ci:
write_result(CI_LINUX_LEAK_FILE_NAME, leak_message)
write_result(CI_LINUX_PEAK_FILE_NAME, peak_message)
print("Result::")
print(leak_message)
print(peak_message)
exit(result.exitcode)
def run_peak(is_ci):
try:
result = PeakCalculator().calculate(FUNCTIONAL_TESTS)
except RT_OCFError as e:
result = Result(exitcode=e.exitcode, message=e.message)
finally:
if args.is_ci:
write_result(CI_LINUX_PEAK_FILE_NAME, result.message)
return result
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--skip-build',
dest="skip_build",
required=False,
action='store_true',
help="True, if you want to skip build")
parser.add_argument(
"--ci",
dest="is_ci",
required=False,
action='store_true',
help="True, if it is ci build.")
args = parser.parse_args()
run(args)
```
#### File: tools/test/test_memory_script.py
```python
import os
from subprocess import call
from tools.internal.config import RT_OCF_ROOT
from tools.internal.config import RT_OCF_ROOT_TOOLS
from tools.internal.config import CI_LINUX_LEAK_FILE_NAME
from tools.internal.config import CI_LINUX_PEAK_FILE_NAME
from tools.test.common import make_fail_file
from tools.test.common import remove_fail_file
class TestMemoryScript:
def setup_method(self, method):
call('rm -rf ci_*.txt', shell=True)
def teardown_method(self, method):
call('rm -rf ci_*.txt', shell=True)
def test_memory_leak(self):
command = '{}/memory.py leak'.format(RT_OCF_ROOT_TOOLS)
call(command, shell=True)
assert not os.path.isfile(CI_LINUX_LEAK_FILE_NAME)
def test_memory_leak_ci(self):
command = '{}/memory.py leak --ci'.format(RT_OCF_ROOT_TOOLS)
call(command, shell=True)
assert os.path.isfile(CI_LINUX_LEAK_FILE_NAME)
def test_memory_peak(self):
command = '{}/memory.py peak'.format(RT_OCF_ROOT_TOOLS)
call(command, shell=True)
assert not os.path.isfile(CI_LINUX_PEAK_FILE_NAME)
def test_memory_peak_ci(self):
command = '{}/memory.py peak --ci'.format(RT_OCF_ROOT_TOOLS)
call(command, shell=True)
assert os.path.isfile(CI_LINUX_PEAK_FILE_NAME)
``` |
{
"source": "JoshWorld/WhaleShark_IIoT",
"score": 2
} |
#### File: JoshWorld/WhaleShark_IIoT/mongo_manager.py
```python
from pymongo import MongoClient, ReturnDocument
import yaml
import logging
import sys
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', stream=sys.stdout, level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S')
class MongoMgr:
def __init__(self):
with open('config/config_server_develop.yaml', 'r') as file:
config_obj = yaml.load(file, Loader=yaml.FullLoader)
self.mongo_host = config_obj['iiot_server']['mongodb']['ip_address']
self.mongo_port = config_obj['iiot_server']['mongodb']['port']
self.id = config_obj['iiot_server']['mongodb']['id']
self.pwd = config_obj['iiot_server']['mongodb']['pwd']
self.db = config_obj['iiot_server']['mongodb']['db']
self.mongo_client = MongoClient(self.mongo_host, username = self.id, password = <PASSWORD>, authSource = self.db, authMechanism = 'SCRAM-SHA-1')
def mongo_conn(self):
return self.mongo_client
def collections(self, database):
return self.mongo_client[database].collection_names()
def documents(self, database, collection):
documents_list = self.mongo_client[database].get_collection(collection).find({})
return documents_list
def document_bykey(self, database, collection, doc_key):
return self.mongo_client[database].get_collection(collection).find_one(doc_key)
def document_upsert(self, database, collection, doc_key, pub_time, status={'SENT':''}):
document = self.mongo_client[database].get_collection(collection).find_one({'DAY':doc_key})
if document is None:
"""
Daily append
"""
self.mongo_client[database].get_collection(collection).insert_one({'DAY':doc_key,"LOG":{pub_time:status}})
else:
"""
Secondary append
"""
if pub_time in document['LOG'].keys():#수신 확인시
doc_id = document['_id']
if 'SENT' in document['LOG'][pub_time]:
document['LOG'][pub_time]['SENT']='RECEIVED'
self.mongo_client[database].get_collection(collection).find_one_and_update({'_id': doc_id},
{'$set': document})
else:
logging.error('KEY:'+ pub_time + ' NO SENT')
else:
doc_id = document['_id']
document['LOG'][pub_time]={'SENT':''}
self.mongo_client[database].get_collection(collection).find_one_and_update({'_id': doc_id},
{'$set': document})
``` |
{
"source": "Josh-WOW/RexBot",
"score": 3
} |
#### File: RexBot/commands/ping.py
```python
import discord
from discord.ext import commands
import datetime
import time
from random import choice, randint
class ping:#This is like the section of each command, when you see the help menu for the bot you will understand
def __init__(self, bot):#This is the define so in this case its "self.bot", you can also add loop and extras to this
self.bot = bot
@commands.command(pass_context=True)#This is a command base
async def ping(self, ctx):
t1 = time.perf_counter()
await self.bot.send_typing(ctx.message.channel)
t2 = time.perf_counter()
ping = str(round((t2-t1)*1000))
author = ctx.message.author.mention
response = await self.bot.say("**{}, this took me {}ms to send**".format(author, ping))
issue = ctx.message
time.sleep(10)
await self.bot.delete_message(response)
time.sleep(0.5)
await self.bot.delete_message(issue)
def setup(bot):
n = ping(bot)
bot.add_cog(n)#This is bot setup, every new command file you make must have this
```
#### File: Josh-WOW/RexBot/rexbot.py
```python
import discord
from discord.ext import commands
import sys, traceback
from os import listdir
from os.path import isfile, join
def get_prefix(bot, message):
prefixes = ['$']
return commands.when_mentioned_or(*prefixes)(bot, message)
cmd_dir = "commands"
bot = commands.Bot(command_prefix=get_prefix, description='R3dfox')
if __name__ == "__main__":
for extension in [f.replace('.py', '') for f in listdir(cmd_dir) if isfile(join(cmd_dir, f))]:
try:
bot.load_extension(cmd_dir + "." + extension)
except Exception as e:
print(f'Failed to load file {extension}.')
traceback.print_exc()
@bot.event
async def on_ready():
print("Starting up")
print(f'Started!')
bot.run('paste token here', bot=True, reconnect=True)
``` |
{
"source": "joshxinjie/clean-code-ml",
"score": 3
} |
#### File: src/tests/test_workshop_metrics.py
```python
import unittest
from sklearn.metrics import precision_score, recall_score
from src.train import prepare_data_and_train_model
class TestModelMetrics(unittest.TestCase):
def test_model_precision_is_above_threshold(self):
model, X_test, Y_test = prepare_data_and_train_model()
Y_pred = model.predict(X_test)
precision = precision_score(Y_test, Y_pred)
self.assertGreaterEqual(precision, 0.6)
def test_model_recall_is_above_threshold(self):
model, X_test, Y_test = prepare_data_and_train_model()
Y_pred = model.predict(X_test)
precision = recall_score(Y_test, Y_pred)
self.assertGreaterEqual(precision, 0.6)
if __name__ == '__main__':
unittest.main()
```
#### File: src/tests/test_workshop.py
```python
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from src.workshop import add, multiply_by_10, impute_nans, add_is_alone_column
# add 1 + 1
class TestProcessing(unittest.TestCase):
def test_add_1_and_1_should_return_2(self):
self.assertEqual(add(1,1),2)
def test_multiply_by_10_should_return_a_df_with_values_multiplied_by_10(self):
# arange
df = pd.DataFrame({
'age': [1,2,3],
'name': ['Bob', 'Alice', 'John']
})
expected_df = pd.DataFrame({
'age': [10, 20, 30],
'name': ['Bob', 'Alice', 'John']
})
assert_frame_equal(multiply_by_10(df),expected_df)
def test_impute_nan_should_replace_nan_values_with_median_values(self):
df = pd.DataFrame({
'age': [1,np.nan,3],
'income': [100, 300, np.nan],
'name': ['Bob', 'Alice', np.nan]
})
expected_df = pd.DataFrame({
'age': [1, 2, 3],
'income': [100, 300, 200],
'name': ['Bob', 'Alice', np.nan]
})
assert_frame_equal(impute_nans(df, columns=['age', 'income']),expected_df, check_dtype=False)
def test_is_alone_should_create_an_isalone_column_with_value_1_if_familysize_is_1(self):
df = pd.DataFrame({
'FamilySize': [1,5,3],
'name': ['Bob', 'Alice', 'John']
})
expected_df = pd.DataFrame({
'FamilySize': [1,5,3],
'name': ['Bob', 'Alice', 'John'],
'IsAlone': [1,0,0]
})
assert_frame_equal(add_is_alone_column(df),expected_df)
if __name__ == '__main__':
unittest.main()
#python -m src.tests.test_workshop
``` |
{
"source": "josh-yang92/BitlyConverter",
"score": 3
} |
#### File: BitlyConverter/BitlyAffiliateConverter/authFunction.py
```python
import os
def getKeys(keyDir):
file = open(os.path.join(keyDir, 'keys.txt'), 'r')
fields = file.read()
fields = fields.split('\n')
keys = {}
for items in fields:
elements = items.split('=')
keys[elements[0].strip()] = elements[1].strip()
return keys
```
#### File: BitlyConverter/BitlyAffiliateConverter/MainDisplayClass.py
```python
import tkinter as tk
from .CustomDialogClass import CustomDialog
from .RequestingFunctions import getItemName
from .RequestingFunctions import getShortLink
class Run:
def __init__(self, root, keys):
self.root = root
self.keys = keys
self.affLink = None
self.longLink = None
# combine the shortened link and the product name, display it in a window
def showDialog(self, shortenedURL, itemName):
inputText = itemName + ': ' + shortenedURL
CustomDialog(self.root, title="Link", text=inputText)
# combine the affiliate link to the site url, get the item names and display it all
def convert(self):
combinedLink = self.affLink.get('1.0', tk.END).strip() + '/' + self.longLink.get('1.0', tk.END).strip()
itemName = getItemName(self.longLink.get('1.0', tk.END).strip())
self.showDialog(getShortLink(self.keys['APIUser'], self.keys['APIKey'], combinedLink), itemName)
def runApp(self):
canvas1 = tk.Canvas(self.root, width=300, height=150)
canvas1.grid()
tk.Label(self.root, text='Affiliate Link').grid(row=0)
tk.Label(self.root, text='Long URL').grid(row=1)
self.affLink = tk.Text(self.root, width=100, height=5)
self.longLink = tk.Text(self.root, width=100, height=5)
self.affLink.insert("end", self.keys['AffLink'])
self.affLink.grid(row=0, column=1)
self.longLink.grid(row=1, column=1)
button1 = tk.Button(text='Convert!', command=self.convert, bg='brown', fg='white').grid(row=3)
canvas1.create_window(150, 150, window=button1)
self.root.mainloop()
``` |
{
"source": "joshy/dcm",
"score": 3
} |
#### File: dcm/dcm/diff.py
```python
from copy import deepcopy
from hashlib import sha256
from typing import Optional, List
from pydicom import Dataset, DataElement
from pydicom.tag import BaseTag
def _shorten_bytes(val: bytes) -> bytes:
if len(val) > 16:
return b"*%d bytes, hash = %s*" % (len(val), sha256(val).hexdigest().encode())
return val
class DataDiff(object):
default_elem_fmt = "{elem.tag} {elem.name: <35} {elem.VR}: {value}"
def __init__(
self,
tag: BaseTag,
l_elem: Optional[DataElement],
r_elem: Optional[DataElement],
elem_fmt: str = default_elem_fmt,
):
self.tag = tag
self.l_elem = deepcopy(l_elem)
self.r_elem = deepcopy(r_elem)
self.elem_fmt = elem_fmt
def _format_elem(self, elem: DataElement) -> str:
value = elem.value
if isinstance(value, bytes):
value = _shorten_bytes(value)
return self.elem_fmt.format(elem=elem, value=value)
def __str__(self) -> str:
res = []
if self.l_elem is not None:
res.append("< %s" % self._format_elem(self.l_elem))
if self.r_elem is not None:
res.append("> %s" % self._format_elem(self.r_elem))
return "\n".join(res)
def diff_data_sets(left: Dataset, right: Dataset) -> List[DataDiff]:
"""Get list of all differences between `left` and `right` data sets"""
l_elems = iter(left)
r_elems = iter(right)
l_elem = r_elem = None
l_done = r_done = False
diffs = []
while True:
if l_elem is None and not l_done:
try:
l_elem = next(l_elems)
except StopIteration:
l_done = True
l_elem = None
if r_elem is None and not r_done:
try:
r_elem = next(r_elems)
except StopIteration:
r_done = True
r_elem = None
if l_elem is None and r_elem is None:
break
if l_elem is None:
assert r_elem is not None
diffs.append(DataDiff(r_elem.tag, l_elem, r_elem))
r_elem = None
elif r_elem is None:
assert l_elem is not None
diffs.append(DataDiff(l_elem.tag, l_elem, r_elem))
l_elem = None
elif l_elem.tag < r_elem.tag:
diffs.append(DataDiff(l_elem.tag, l_elem, None))
l_elem = None
elif r_elem.tag < l_elem.tag:
diffs.append(DataDiff(r_elem.tag, None, r_elem))
r_elem = None
else:
if l_elem.value != r_elem.value or l_elem.VR != r_elem.VR:
diffs.append(DataDiff(l_elem.tag, l_elem, r_elem))
l_elem = r_elem = None
return diffs
```
#### File: dcm/dcm/route.py
```python
from __future__ import annotations
import asyncio, logging
from copy import copy, deepcopy
from datetime import datetime
from dataclasses import dataclass, field
from typing import (
Optional,
Tuple,
Callable,
Dict,
List,
Union,
Iterable,
AsyncIterator,
AsyncContextManager,
Any,
cast,
)
from contextlib import asynccontextmanager
import janus
from pydicom import Dataset
from .lazyset import LazySet, FrozenLazySet
from .query import (
QueryLevel,
QueryResult,
DataNode,
InconsistentDataError,
get_uid,
minimal_copy,
)
from .filt import Filter, DataTransform, get_transform, Selector
from .report import (
CountableReport,
MultiListReport,
MultiDictReport,
MultiKeyedError,
ProgressHookBase,
)
from .util import DuplicateDataError, TomlConfigurable
from .net import DicomOpReport, IncomingDataError, IncomingErrorType
from .store import DataBucket, DataRepo, TransferMethod, LocalWriteReport
log = logging.getLogger(__name__)
class NoValidTransferMethodError(Exception):
"""Error raised when we are unable to select a valid transfer method"""
def __init__(
self,
src_dest_pair: Optional[
Tuple[DataBucket[Any, Any], DataBucket[Any, Any]]
] = None,
):
self.src_dest_pair = src_dest_pair
def __str__(self) -> str:
if self.src_dest_pair is None:
return "No valid transfer method for one or more routes"
else:
return f"No valid transfer method between {self.src_dest_pair[0]} and {self.src_dest_pair[1]}"
# TODO: Have been working under the assumption the filter would be applied
# before resolving dynamic routes, but it is more likely and common
# that we would want to route on the original data, since we may have
# a rather broad filter (i.e. anonymization) that screws up the elements
# used for routing.
#
# Any logic that would go into a pre-filter could just be placed in the
# dynamic routing function. We might just need to duplicate that logic
# into a filter if we also want to persist the changes which is an okay
# trade-off compared to the complexity of allowing both pre/post filters
#
# We do lose the ability to specify which elements might be
# modified, how they might be modified, and what their dependencies are.
# Do we implicitly disallow uninvertible shenanigans in the dynamic routing
# function?
@dataclass(frozen=True)
class Route:
"""Abstract base class for all Routes
The main functionality of routes is to map datasets to destinations.
Routes can have a filter associated with them, which take a dataset as
input and return one as output. The dataset can be modified and None can be
returned to reject the dataset.
"""
filt: Optional[Filter] = None
"""Streaming data filter for editing and rejecting data sets"""
def get_dests(
self, data_set: Dataset
) -> Optional[Tuple[DataBucket[Any, Any], ...]]:
"""Return the destintations for the `data set`
Must be implemented by all subclasses."""
raise NotImplementedError
def get_filtered(self, data_set: Dataset) -> Optional[Dataset]:
if self.filt is None:
return data_set
return self.filt(data_set)
@dataclass(frozen=True)
class _StaticBase:
dests: Tuple[DataBucket[Any, Any], ...]
"""Static tuple of destinations"""
methods: Tuple[TransferMethod, ...] = (TransferMethod.PROXY,)
"""The transfer methods to use, in order of preference
This will automatically be paired down to the methods supported by all the
dests (or just allow PROXY if we have a filter). If no valid transfer
methods are given a `NoValidTransferMethodError` will be raised.
"""
@dataclass(frozen=True)
class StaticRoute(Route, _StaticBase, TomlConfigurable["StaticRoute"]):
"""Static route that sends all (unfiltered) data to same dests"""
def __post_init__(self) -> None:
if self.filt is not None:
if TransferMethod.PROXY not in self.methods:
raise NoValidTransferMethodError()
avail_methods = [TransferMethod.PROXY]
else:
avail_methods = []
for meth in self.methods:
if all(meth in d._supported_methods for d in self.dests):
avail_methods.append(meth)
if len(avail_methods) == 0:
raise NoValidTransferMethodError()
object.__setattr__(self, "dests", tuple(self.dests))
object.__setattr__(self, "methods", tuple(avail_methods))
@classmethod
def from_toml_dict(cls, toml_dict: Dict[str, Any]) -> StaticRoute:
kwargs = deepcopy(toml_dict)
methods = kwargs.get("methods")
if methods is not None:
kwargs["methods"] = tuple(TransferMethod[m.upper()] for m in methods)
return cls(**kwargs)
def get_dests(self, data_set: Dataset) -> Tuple[DataBucket[Any, Any], ...]:
return self.dests
def get_method(self, src: DataBucket[Any, Any]) -> TransferMethod:
for method in self.methods:
if method in src._supported_methods:
return method
raise NoValidTransferMethodError()
def __str__(self) -> str:
return "Static: %s" % ",".join(str(d) for d in self.dests)
@dataclass(frozen=True)
class _DynamicBase:
lookup: Callable[[Dataset], Optional[Tuple[DataBucket[Any, Any], ...]]]
"""Callable takes a dataset and returns destinations"""
route_level: QueryLevel = QueryLevel.STUDY
"""The level in the DICOM hierarchy we are making routing decisions at"""
required_elems: FrozenLazySet[str] = field(default_factory=FrozenLazySet)
"""DICOM elements that we require to make a routing decision"""
dest_methods: Optional[
Dict[Optional[DataBucket[Any, Any]], Tuple[TransferMethod, ...]]
] = None
"""Specify transfer methods for (some) dests
Use `None` as the key to specify the default transfer methods for all dests
not explicitly listed.
Only respected when pre-routing is used. Dynamic routing can only proxy.
"""
@dataclass(frozen=True)
class DynamicRoute(Route, _DynamicBase):
"""Dynamic route which determines destinations based on the data.
Routing decisions are made before applying the filter to the data.
"""
def __post_init__(self) -> None:
if self.dest_methods is not None:
avail_meths: Dict[
Optional[DataBucket[Any, Any]], Tuple[TransferMethod, ...]
] = {}
for dest, methods in self.dest_methods.items():
if self.filt is not None:
if TransferMethod.PROXY not in methods:
raise NoValidTransferMethodError()
avail_meths[dest] = (TransferMethod.PROXY,)
elif dest is None:
avail_meths[dest] = methods
else:
meths = tuple(m for m in methods if m in dest._supported_methods)
if len(meths) == 0:
raise NoValidTransferMethodError()
avail_meths[dest] = meths
object.__setattr__(self, "dest_methods", avail_meths)
if self.route_level not in QueryLevel:
raise ValueError("Invalid route_level: %s" % self.route_level)
if not isinstance(self.required_elems, FrozenLazySet):
object.__setattr__(
self, "required_elems", FrozenLazySet(self.required_elems)
)
def get_dests(
self, data_set: Dataset
) -> Optional[Tuple[DataBucket[Any, Any], ...]]:
dests = self.lookup(data_set)
if dests is None:
return None
return tuple(dests)
def get_static_routes(self, data_set: Dataset) -> Optional[Tuple[StaticRoute, ...]]:
"""Resolve this dynamic route into one or more static routes"""
dests = self.lookup(data_set)
if dests is None:
return dests
dests = tuple(dests)
if self.dest_methods is not None:
meths_dests_map: Dict[
Tuple[TransferMethod, ...], List[DataBucket[Any, Any]]
] = {}
default_methods = self.dest_methods.get(None)
if default_methods is None:
default_methods = (TransferMethod.PROXY,)
for dest in dests:
d_methods = self.dest_methods.get(dest)
if d_methods is None:
d_methods = default_methods
if d_methods not in meths_dests_map:
meths_dests_map[d_methods] = []
meths_dests_map[d_methods].append(dest)
return tuple(
StaticRoute(tuple(sub_dests), filt=deepcopy(self.filt), methods=meths)
for meths, sub_dests in meths_dests_map.items()
)
else:
return (StaticRoute(dests, filt=deepcopy(self.filt)),)
def __str__(self) -> str:
return "Dynamic on: %s" % self.required_elems
@dataclass(frozen=True)
class SelectorDestMap(TomlConfigurable["SelectorDestMap"]):
"""Allow construction of dynamic routes from static config"""
routing_map: Tuple[Tuple[Selector, Tuple[DataBucket[Any, Any], ...]], ...]
"""One or more tuples of (selector, dests) pairs"""
default_dests: Optional[Tuple[DataBucket[Any, Any], ...]] = None
"""The default destinations to use when no selectors match"""
exclude: Optional[Tuple[Selector, ...]] = None
"""Exclude data at routing step (versus `filt` which is applied to each image)"""
stop_on_first: bool = True
"""Just return dests associated with first selector that matches"""
route_level: QueryLevel = QueryLevel.STUDY
"""The level in the DICOM hierarchy we are making routing decisions at"""
dest_methods: Optional[
Dict[Optional[DataBucket[Any, Any]], Tuple[TransferMethod, ...]]
] = None
"""Specify transfer methods for (some) dests
Use `None` as the key to specify the default transfer methods for all dests
not explicitly listed.
Only respected when pre-routing is used. Dynamic routing can only proxy.
"""
required_elems: FrozenLazySet[str] = field(
default_factory=FrozenLazySet, init=False
)
"""DICOM elements that we require to make a routing decision"""
filt: Optional[Filter] = None
"""Steaming data filter for editing and rejecting data sets"""
def __post_init__(self) -> None:
req_elems: LazySet[str] = LazySet()
for sel, _ in self.routing_map:
req_elems |= sel.get_read_elems()
if self.exclude:
for sel in self.exclude:
req_elems |= sel.get_read_elems()
object.__setattr__(self, "required_elems", FrozenLazySet(req_elems))
@classmethod
def from_toml_dict(cls, toml_dict: Dict[str, Any]) -> SelectorDestMap:
kwargs = deepcopy(toml_dict)
route_level = kwargs.get("route_level")
if route_level is not None:
kwargs["route_level"] = QueryLevel[route_level.upper()]
return cls(**kwargs)
def get_dynamic_route(self) -> DynamicRoute:
"""Return equivalent DynamicRoute object"""
def lookup_func(ds: Dataset) -> Optional[Tuple[DataBucket[Any, Any], ...]]:
res: List[DataBucket[Any, Any]] = []
if self.exclude:
if any(sel.test_ds(ds) for sel in self.exclude):
return None
for sel, dests in self.routing_map:
if sel.test_ds(ds):
if self.stop_on_first:
return dests
else:
res += dests
if not res:
return self.default_dests
return tuple(res)
return DynamicRoute(
lookup_func,
route_level=self.route_level,
required_elems=self.required_elems,
dest_methods=self.dest_methods,
filt=self.filt,
)
class ProxyTransferError(Exception):
def __init__(
self,
store_errors: Optional[MultiKeyedError] = None,
inconsistent: Optional[Dict[StaticRoute, List[Tuple[Dataset, Dataset]]]] = None,
duplicate: Optional[Dict[StaticRoute, List[Tuple[Dataset, Dataset]]]] = None,
incoming_error: Optional[IncomingDataError] = None,
):
self.store_errors = store_errors
self.inconsistent = inconsistent
self.duplicate = duplicate
self.incoming_error = incoming_error
def __str__(self) -> str:
res = ["ProxyTransferError:"]
if self.inconsistent is not None:
res.append("%d inconsistent data sets" % len(self.inconsistent))
if self.duplicate is not None:
res.append("%d duplicate data sets" % len(self.duplicate))
if self.store_errors is not None:
for err in self.store_errors.errors:
res.append(str(err))
if self.incoming_error is not None:
res.append(str(self.incoming_error))
return "\n\t".join(res)
# TODO: Some annoying overlap with IncomingDataReport here, but not clear we
# can do much about it since we need a RetrieveReport when the src is
# remote, and we need the `sent` dict here to track data transforms.
#
# Can we make sure the same (minimized) data set is used in all report
# structures? Does that alleviate all concerns about duplication?
# TODO: Update keep_errors handling here. I guess the `add` method should
# return a bool like with the IncomingDataReports? Also, this means that
# we might end up sending erroneous data, which can't be caputred in the
# DataTransforms under `sent` here. I guess this is okay and mimics what
# happens in a RetrieveReport
#
class ProxyReport(CountableReport):
"""Abstract base class for reports on proxy transfers"""
def __init__(
self,
description: Optional[str] = None,
meta_data: Optional[Dict[str, Any]] = None,
depth: int = 0,
prog_hook: Optional[ProgressHookBase[Any]] = None,
n_expected: Optional[int] = None,
keep_errors: Union[bool, Tuple[IncomingErrorType, ...]] = False,
):
self.keep_errors = keep_errors # type: ignore
self.sent: Dict[StaticRoute, DataTransform] = {}
self.inconsistent: Dict[StaticRoute, List[Tuple[Dataset, Dataset]]] = {}
self.duplicate: Dict[StaticRoute, List[Tuple[Dataset, Dataset]]] = {}
self._n_success = 0
super().__init__(description, meta_data, depth, prog_hook, n_expected)
@property
def keep_errors(self) -> Tuple[IncomingErrorType, ...]:
"""Whether or not we are forwarding inconsistent/duplicate data"""
return self._keep_errors
@keep_errors.setter
def keep_errors(self, val: Union[bool, Tuple[IncomingErrorType, ...]]) -> None:
if val == True:
self._keep_errors = tuple(IncomingErrorType)
elif val == False:
self._keep_errors = tuple()
else:
val = cast(Tuple[IncomingErrorType, ...], val)
self._keep_errors = val
@property
def n_success(self) -> int:
return self._n_success
@property
def n_errors(self) -> int:
n_errors = 0
if not self.keep_errors:
n_errors += self.n_inconsistent + self.n_duplicate
return n_errors
@property
def n_warnings(self) -> int:
n_warn = 0
if self.keep_errors:
n_warn += self.n_inconsistent + self.n_duplicate
return n_warn
@property
def n_sent(self) -> int:
"""Number of times datasets were sent out"""
res = sum(len(trans.new) * len(sr.dests) for sr, trans in self.sent.items())
if self.keep_errors:
res += sum(len(x) * len(sr.dests) for sr, x in self.inconsistent.items())
res += sum(len(x) * len(sr.dests) for sr, x in self.duplicate.items())
return res
@property
def n_inconsistent(self) -> int:
return sum(len(x) for _, x in self.inconsistent.items())
@property
def n_duplicate(self) -> int:
return sum(len(x) for _, x in self.duplicate.items())
@property
def n_reported(self) -> int:
"""Number store results that have been reported so far"""
raise NotImplementedError
@property
def all_reported(self) -> bool:
"""True if all sent data sets have a reported result"""
assert self.n_reported <= self.n_sent
return self.n_sent == self.n_reported
def add(self, route: StaticRoute, old_ds: Dataset, new_ds: Dataset) -> bool:
"""Add the route with pre/post filtering dataset to the report"""
self.count_input()
if route not in self.sent:
self.sent[route] = get_transform(QueryResult(QueryLevel.IMAGE), route.filt)
try:
self.sent[route].add(old_ds, new_ds)
except InconsistentDataError:
if route not in self.inconsistent:
self.inconsistent[route] = []
self.inconsistent[route].append((old_ds, new_ds))
return IncomingErrorType.INCONSISTENT in self._keep_errors
except DuplicateDataError:
if route not in self.duplicate:
self.duplicate[route] = []
self.duplicate[route].append((old_ds, new_ds))
return IncomingErrorType.DUPLICATE in self._keep_errors
else:
self._n_success += 1
return True
def log_issues(self) -> None:
"""Produce log messages for any warning/error statuses"""
n_inconsist = self.n_inconsistent
if n_inconsist:
if self.keep_errors:
log.warning("Sent %d inconsistent data sets" % n_inconsist)
else:
log.error("Skipped %d inconsistent data sets" % n_inconsist)
n_duplicate = self.n_duplicate
if n_duplicate:
if self.keep_errors:
log.warning("Sent %d duplicate data sets" % n_duplicate)
else:
log.error("Skipped %d duplicate data sets" % n_duplicate)
def check_errors(self) -> None:
"""Raise an exception if any errors have occured so far"""
if self.n_errors:
inconsist = None
if self.inconsistent:
inconsist = self.inconsistent
dupes = None
if self.duplicate:
dupes = self.duplicate
raise ProxyTransferError(inconsistent=inconsist, duplicate=dupes)
def clear(self) -> None:
self.sent.clear()
self.inconsistent.clear()
self.duplicate.clear()
StoreReportType = Union[DicomOpReport, LocalWriteReport]
class DynamicTransferReport(ProxyReport):
"""Track what data is being routed where and any store results"""
def __init__(
self,
description: Optional[str] = None,
meta_data: Optional[Dict[str, Any]] = None,
depth: int = 0,
prog_hook: Optional[ProgressHookBase[Any]] = None,
n_expected: Optional[int] = None,
keep_errors: Union[bool, Tuple[IncomingErrorType, ...]] = False,
):
self.store_reports: MultiDictReport[
DataBucket[Any, Any], MultiListReport[StoreReportType]
] = MultiDictReport(prog_hook=prog_hook)
super().__init__(
description, meta_data, depth, prog_hook, n_expected, keep_errors
)
@property
def n_success(self) -> int:
return super().n_success + self.store_reports.n_success
@property
def n_errors(self) -> int:
return super().n_errors + self.store_reports.n_errors
@property
def n_warnings(self) -> int:
return super().n_warnings + self.store_reports.n_warnings
@property
def n_reported(self) -> int:
return self.store_reports.n_input
def add_store_report(
self, dest: DataBucket[Any, Any], store_report: StoreReportType
) -> None:
"""Add a DicomOpReport to keep track of"""
if dest not in self.store_reports:
self.store_reports[dest] = MultiListReport(prog_hook=self._prog_hook)
self.store_reports[dest].append(store_report)
def log_issues(self) -> None:
"""Produce log messages for any warning/error statuses"""
super().log_issues()
self.store_reports.log_issues()
def check_errors(self) -> None:
"""Raise an exception if any errors have occured so far"""
if self.n_errors:
err = None
try:
super().check_errors()
except ProxyTransferError as e:
err = e
else:
err = ProxyTransferError()
try:
self.store_reports.check_errors()
except MultiKeyedError as e:
err.store_errors = e
raise err
def clear(self) -> None:
"""Clear current info about data sets we have results for"""
# TODO: If n_sent != n_reported here we will go out of sync. I guess
# this would need to be managed at a higher level if it is
# needed. Not clear if it makes sense to do anything about it
# here.
super().clear()
self.store_reports.clear()
@dataclass
class _CacheEntry:
"""Entry in a SendAssociationCache"""
ctx_mgr: AsyncContextManager["janus._AsyncQueueProxy[Dataset]"]
send_q: "janus._AsyncQueueProxy[Dataset]"
op_report: DicomOpReport
last_use: datetime
# TODO: Make generic association caching in `net` module supporting
# query/move/send. Could then use that everywhere, and use it to
# manage max association limits on any node.
class SendAssociationCache:
def __init__(self, timeout: float = 30.0):
"""Keeps cache of recent associations"""
self._timeout = timeout
self._cache: Dict[DataBucket[Any, Any], _CacheEntry] = {}
@property
def next_timeout(self) -> float:
"""Number of seconds until the next cache entry will timeout"""
next_timeout = self._timeout
now = datetime.now()
for cache_entry in self._cache.values():
td = now - cache_entry.last_use
timeout = max(self._timeout - td.total_seconds(), 0)
if timeout < next_timeout:
next_timeout = timeout
return next_timeout
async def send(
self, ds: Dataset, dest: DataBucket[Any, Any]
) -> Optional[DicomOpReport]:
"""Send a data set to dests, utilizing the cache of active associations"""
res = None
cache_entry = self._cache.get(dest, None)
if cache_entry is None:
op_report = dest.get_empty_send_report()
res = op_report
ctx_mgr = dest.send(op_report)
send_q = await ctx_mgr.__aenter__()
cache_entry = _CacheEntry(ctx_mgr, send_q, op_report, datetime.now())
self._cache[dest] = cache_entry
else:
cache_entry.last_use = datetime.now()
send_q = cache_entry.send_q
await send_q.put(ds)
return res
async def update_cache(self) -> Dict[DataBucket[Any, Any], DicomOpReport]:
"""Close associations that haven't been used in a while
Returns reports for all closed associations.
"""
curr_time = datetime.now()
reports = {}
for dest, cache_entry in self._cache.items():
age = curr_time - cache_entry.last_use
if age.total_seconds() > self._timeout:
await cache_entry.ctx_mgr.__aexit__(None, None, None)
reports[dest] = cache_entry.op_report
for dest in reports:
del self._cache[dest]
return reports
async def empty_cache(self) -> Dict[DataBucket[Any, Any], DicomOpReport]:
"""Close all associations
Returns dict of dest/op_report for all closed associations.
"""
reports = {}
for dest, cache_entry in self._cache.items():
await cache_entry.ctx_mgr.__aexit__(None, None, None)
reports[dest] = cache_entry.op_report
self._cache.clear()
return reports
class InsufficientElemsError(Exception):
"""We don't have the required DICOM elements for the operation"""
class Router:
"""Work with multiple dynamic/static routes"""
def __init__(self, routes: Iterable[Route], assoc_cache_time: int = 20):
self._routes = tuple(routes)
self._assoc_cache_time = assoc_cache_time
self._static: List[StaticRoute] = []
self._dynamic: List[DynamicRoute] = []
self._route_level = QueryLevel.PATIENT
req_elems: LazySet[str] = LazySet()
self._all_proxy = True
for route in routes:
if isinstance(route, DynamicRoute):
self._dynamic.append(route)
req_elems |= route.required_elems
self._route_level = max(self._route_level, route.route_level)
if route.dest_methods is not None:
for methods in route.dest_methods.values():
if TransferMethod.PROXY not in methods:
self._all_proxy = False
elif isinstance(route, StaticRoute):
self._static.append(route)
if TransferMethod.PROXY not in route.methods:
self._all_proxy = False
else:
raise ValueError("Unrecognized route type")
self._required_elems = FrozenLazySet(req_elems)
if len(self._dynamic) == 0:
self._route_level = QueryLevel.STUDY
elif not self.can_pre_route and not self.can_dyn_route:
raise NoValidTransferMethodError()
@property
def required_elems(self) -> FrozenLazySet[str]:
"""All required DICOM elements for making routing decisions"""
return self._required_elems
@property
def has_dynamic_routes(self) -> bool:
return len(self._dynamic) != 0
@property
def can_pre_route(self) -> bool:
return self._route_level != QueryLevel.IMAGE
@property
def can_dyn_route(self) -> bool:
return self._all_proxy
def get_filter_dest_map(
self, ds: Dataset
) -> Dict[Optional[Filter], Tuple[DataBucket[Any, Any], ...]]:
"""Get dict mapping filters to lists of destinations"""
selected: Dict[Optional[Filter], List[DataBucket[Any, Any]]] = {}
for route in self._routes:
dests = route.get_dests(ds)
if not dests:
continue
filt = route.filt
if filt not in selected:
selected[filt] = list(dests)
else:
selected[filt] += dests
return {k: tuple(v) for k, v in selected.items()}
async def pre_route(
self,
src: DataRepo[Any, Any, Any, Any],
query: Optional[Dataset] = None,
query_res: QueryResult = None,
) -> Dict[Tuple[StaticRoute, ...], QueryResult]:
"""Pre-calculate any dynamic routing for data on `src`
If DICOM elements needed for routing decisions can't be queried for, we
will retrieve an example data set for that study.
Parameters
----------
src
The data source
query
A query that defines the data to route
query_res
A QueryResult that defines the data to route
Returns
-------
result : dict
Maps tuples of StaticRoute objects to QueryResults defining all of
the data that should be sent to those routes.
"""
route_level = self._route_level
if route_level == QueryLevel.IMAGE:
raise ValueError("Can't pre-route at IMAGE level")
# Try to get required DICOM elements by doing a query if needed
query, query_res = await self._fill_qr(src, query, query_res)
# Nothing to do...
if len(self._dynamic) == 0:
return {tuple(self._static): query_res}
log.info("Trying to resolve dynamic routes with queries")
# Iteratively try to extract example data sets with all the elements
# needed for routing from our QueryResult, while also performing higher
# level-of-detail queries as needed. In the end the missing_qr will
# specify a single image for each chunk of data we don't have an
# example data set for
example_data: Dict[str, Dataset] = {}
missing_qr = query_res
while True:
new_missing_qr = QueryResult(level=missing_qr.level)
for pth, sub_uids in missing_qr.walk():
if pth.level < route_level:
continue
if pth.level != missing_qr.level:
# We only want to visit one sub-element
# TODO: Allow user defined sorting here?
del sub_uids[1:]
continue
lvl_uid = pth.uids[-1]
ds = deepcopy(missing_qr[lvl_uid])
for k in self.required_elems:
if k not in ds:
new_missing_qr.add(ds)
break
else:
route_uid = pth.uids[route_level]
assert route_uid not in example_data
example_data[route_uid] = ds
missing_qr = new_missing_qr
if len(missing_qr) == 0 or missing_qr.level == QueryLevel.IMAGE:
break
missing_qr = await src.query(
QueryLevel(missing_qr.level + 1), query, missing_qr
)
# For any studies where we don't have example data, fetch some
if len(missing_qr) != 0:
log.info("Fetching example data to resolve dynamic routes")
async for ds in src.retrieve(missing_qr):
route_uid = get_uid(route_level, ds)
assert route_uid not in example_data
example_data[route_uid] = ds
assert len(example_data) == query_res.get_count(route_level)
# Resolve all dynamic routes into data specific static routes
res: Dict[Tuple[StaticRoute, ...], QueryResult] = {}
for route_uid, ds in example_data.items():
sub_routes = copy(self._static)
for route in self._dynamic:
static_routes = route.get_static_routes(ds)
if static_routes:
sub_routes.extend(static_routes)
if sub_routes:
sub_routes_tup = tuple(sub_routes)
if sub_routes_tup not in res:
res[sub_routes_tup] = QueryResult(query_res.level)
sub_qr = query_res.sub_query(DataNode(route_level, route_uid))
res[sub_routes_tup] |= sub_qr
else:
log.info("Skipping chunk at routing stage: %s", route_uid)
# TODO: Track this in report
log.info("All dynamic routes have been resolved")
return res
@asynccontextmanager
async def route(
self,
keep_errors: Union[bool, Tuple[IncomingErrorType, ...]] = False,
report: Optional[DynamicTransferReport] = None,
) -> AsyncIterator["asyncio.Queue[Dataset]"]:
"""Produces queue where datasets can be put for dynamic routing
Parameters
----------
keep_errors
Set to true to send all data, even if it is inconsistent/duplicate
report
Pass a DynamicTransferReport in to be filled out on the fly
Provides insight into what data is being routed where
"""
if not self.can_dyn_route:
raise NoValidTransferMethodError()
data_q: "asyncio.Queue[Optional[Dataset]]" = asyncio.Queue()
route_task = asyncio.create_task(self._route(data_q, keep_errors, report))
try:
yield data_q # type: ignore
finally:
if not route_task.done():
await data_q.put(None)
await route_task
async def _route(
self,
data_q: "asyncio.Queue[Optional[Dataset]]",
keep_errors: Union[bool, Tuple[IncomingErrorType, ...]],
report: Optional[DynamicTransferReport],
) -> None:
if report is None:
extern_report = False
report = DynamicTransferReport()
else:
extern_report = True
report.keep_errors = keep_errors # type: ignore
assoc_cache = SendAssociationCache(self._assoc_cache_time)
try:
n_pushed = 0
while True:
try:
ds = await asyncio.wait_for(
data_q.get(), min(assoc_cache.next_timeout, 5.0)
)
except asyncio.TimeoutError:
await assoc_cache.update_cache()
continue
# TODO: Do we want this? Or should we just use task canceling?
# What happens if a user pushes None accidentally? Just
# use a different sentinel value?
if ds is None:
break
filter_dest_map = self.get_filter_dest_map(ds)
n_filt = len([f for f in filter_dest_map if f is not None])
# Only make copy of the data set if needed
if n_filt > 1:
orig_ds = deepcopy(ds)
else:
orig_ds = ds
min_old_ds = minimal_copy(ds)
for filt, dests in filter_dest_map.items():
static_route = StaticRoute(dests, filt=filt)
# Update report
if filt is not None:
filt_ds = filt(orig_ds)
if filt_ds is not None:
min_new_ds = minimal_copy(filt_ds)
else:
filt_ds = orig_ds
min_new_ds = min_old_ds
if filt_ds is None:
continue
if not report.add(static_route, min_old_ds, min_new_ds):
continue
# Initiate the transfers
coros = [assoc_cache.send(filt_ds, dest) for dest in dests]
log.debug("Router forwarding data set to %d dests" % len(dests))
op_reports = await asyncio.gather(*coros)
for op_report, dest in zip(op_reports, dests):
if op_report is not None:
report.add_store_report(dest, op_report)
n_pushed += 1
# Periodically check to avoid association timeouts under high
# traffic
if n_pushed % 100 == 0:
await assoc_cache.update_cache()
finally:
await assoc_cache.empty_cache()
report.done = True
if not extern_report:
report.log_issues()
report.check_errors()
async def _fill_qr(
self,
src: DataRepo[Any, Any, Any, Any],
query: Optional[Dataset],
query_res: Optional[QueryResult],
) -> Tuple[Dataset, QueryResult]:
"""Perform a query against the src if needed"""
if query is None:
query = Dataset()
req_elems = self.required_elems
if query_res is None:
level = self._route_level
else:
level = query_res.level
if level < self._route_level:
level = self._route_level
elif not req_elems:
# Nothing we need to query for
return (query, query_res)
elif req_elems.is_enumerable():
if query_res.prov.queried_elems is not None and all(
e in query_res.prov.queried_elems for e in req_elems
):
# All required elems were already queried for
return (query, query_res)
# Check if all required elems already exist
# TODO: Iterating every data set seems wasteful...
needs_query = False
for ds in query_res:
for elem in req_elems:
if elem not in ds:
log.debug("Router needs to query due to missing elements")
needs_query = True
break
if not needs_query:
return (query, query_res)
if req_elems.is_enumerable():
for e in req_elems:
setattr(query, e, "")
log.info("The Router is perfoming an intial query against the source: %s", src)
return (query, await src.query(level, query, query_res))
```
#### File: dcm/dcm/sync.py
```python
from __future__ import annotations
import logging, itertools, textwrap
from copy import deepcopy
from dataclasses import dataclass, field
import asyncio
from types import TracebackType
from typing import (
Optional,
Tuple,
Dict,
List,
Union,
Iterable,
Any,
Set,
Type,
Callable,
TypeVar,
Iterator,
AsyncIterator,
cast,
Generic,
)
from typing_extensions import Protocol
from contextlib import AsyncExitStack, asynccontextmanager
from pydicom import Dataset
from .query import (
QueryLevel,
QueryResult,
DataNode,
get_all_uids,
minimal_copy,
uid_elems,
)
from .store import (
TransferMethod,
DataChunk,
DataBucket,
OobCapable,
DataRepo,
LocalIncomingReport,
)
from .filt import get_transform, Filter
from .route import (
Route,
StaticRoute,
Router,
ProxyTransferError,
ProxyReport,
StoreReportType,
DynamicTransferReport,
NoValidTransferMethodError,
)
from .diff import diff_data_sets, DataDiff
from .net import (
IncomingDataReport,
IncomingDataError,
IncomingErrorType,
DicomOpReport,
RetrieveReport,
)
from .report import (
BaseReport,
MultiAttrReport,
MultiListReport,
MultiDictReport,
MultiKeyedError,
ProgressHookBase,
)
from .util import dict_to_ds
log = logging.getLogger(__name__)
class StaticStoreReport(MultiDictReport[DataBucket[Any, Any], StoreReportType]):
"""Transfer report that only captures storage"""
IncomingReportType = Union[IncomingDataReport, RetrieveReport, LocalIncomingReport]
class StaticProxyTransferReport(ProxyReport):
"""Static proxy transfer report"""
def __init__(
self,
description: Optional[str] = None,
meta_data: Optional[Dict[str, Any]] = None,
depth: int = 0,
n_expected: Optional[int] = None,
prog_hook: Optional[ProgressHookBase[Any]] = None,
keep_errors: Union[bool, Tuple[IncomingErrorType, ...]] = False,
):
self.store_reports: StaticStoreReport = StaticStoreReport(prog_hook=prog_hook)
super().__init__(
description, meta_data, depth, prog_hook, n_expected, keep_errors
)
@property
def n_success(self) -> int:
return super().n_success + self.store_reports.n_success
@property
def n_errors(self) -> int:
return super().n_errors + self.store_reports.n_errors
@property
def n_warnings(self) -> int:
return super().n_warnings + self.store_reports.n_warnings
@property
def n_reported(self) -> int:
return self.store_reports.n_input
def add_store_report(
self, dest: DataBucket[Any, Any], store_report: StoreReportType
) -> None:
"""Add a DicomOpReport or LocalWriteReport to keep track of"""
assert dest not in self.store_reports
if self.n_expected is not None and store_report.n_expected is None:
store_report.n_expected = self.n_expected
self.store_reports[dest] = store_report
def log_issues(self) -> None:
"""Produce log messages for any warning/error statuses"""
super().log_issues()
self.store_reports.log_issues()
def check_errors(self) -> None:
"""Raise an exception if any errors have occured so far"""
if self.n_errors:
err = None
try:
super().check_errors()
except ProxyTransferError as e:
err = e
else:
err = ProxyTransferError()
try:
self.store_reports.check_errors()
except MultiKeyedError as e:
err.store_errors = e
raise err
def clear(self) -> None:
super().clear()
self.store_reports.clear()
def _set_depth(self, val: int) -> None:
if val != self._depth:
self._depth = val
self.store_reports.depth = val + 1
class StaticOobTransferReport(MultiDictReport[TransferMethod, StaticStoreReport]):
"""Transfer report for out-of-band transfers"""
class StaticTransferError(Exception):
def __init__(
self,
proxy_error: Optional[ProxyTransferError] = None,
oob_error: Optional[MultiKeyedError] = None,
):
self.proxy_error = proxy_error
self.oob_error = oob_error
def __str__(self) -> str:
res = ["StaticTransferError:"]
if self.proxy_error is not None:
res.append("\tProxy Error: %s" % str(self.proxy_error))
if self.oob_error is not None:
res.append("\tOut-of-band Error: %s" % str(self.oob_error))
return "\n".join(res)
class StaticTransferReport(MultiAttrReport):
"""Capture all possible info about a single StaticTranfer"""
def __init__(
self,
description: Optional[str] = None,
meta_data: Optional[Dict[str, Any]] = None,
depth: int = 0,
prog_hook: Optional[ProgressHookBase[Any]] = None,
incoming_report: Optional[IncomingReportType] = None,
):
self._incoming_report = None
self._proxy_report: Optional[StaticProxyTransferReport] = None
self._oob_report: Optional[StaticOobTransferReport] = None
self._report_attrs = ["incoming_report", "_proxy_report", "_oob_report"]
super().__init__(description, meta_data, depth, prog_hook)
if incoming_report is not None:
self.incoming_report = incoming_report
@property
def incoming_report(self) -> Optional[IncomingReportType]:
return self._incoming_report
@incoming_report.setter
def incoming_report(self, val: IncomingReportType) -> None:
if self._incoming_report is not None:
raise ValueError("The incoming report was already set")
self._incoming_report = val
self._incoming_report.depth = self._depth + 1
self._incoming_report.prog_hook = self._prog_hook
@property
def proxy_report(self) -> StaticProxyTransferReport:
if self._proxy_report is None:
self._proxy_report = StaticProxyTransferReport(
depth=self._depth + 1, prog_hook=self._prog_hook
)
if (
self._proxy_report.n_expected is None
and self._incoming_report is not None
and self._incoming_report.n_expected is not None
):
self._proxy_report.n_expected = self._incoming_report.n_expected
return self._proxy_report
@property
def oob_report(self) -> StaticOobTransferReport:
if self._oob_report is None:
self._oob_report = StaticOobTransferReport(
depth=self._depth + 1, prog_hook=self._prog_hook
)
if (
self._oob_report.n_expected is None
and self._incoming_report is not None
and self._incoming_report.n_expected is not None
):
self._oob_report.n_expected = self._incoming_report.n_expected
return self._oob_report
def check_errors(self) -> None:
"""Raise an exception if any errors have occured so far"""
if self.has_errors:
err = StaticTransferError()
if self.proxy_report is not None:
try:
self.proxy_report.check_errors()
except ProxyTransferError as e:
err.proxy_error = e
if self.oob_report is not None:
try:
self.oob_report.check_errors()
except MultiKeyedError as e:
err.oob_error = e
raise err
T_report = TypeVar(
"T_report", bound=Union[DynamicTransferReport, StaticTransferReport], covariant=True
)
@dataclass
class Transfer(Generic[T_report]):
chunk: DataChunk
report: T_report
@dataclass
class DynamicTransfer(Transfer["DynamicTransferReport"]):
chunk: DataChunk
report: DynamicTransferReport = field(default_factory=DynamicTransferReport)
@dataclass
class StaticTransfer(Transfer["StaticTransferReport"]):
chunk: DataChunk
report: StaticTransferReport = field(default_factory=StaticTransferReport)
method_routes_map: Dict[TransferMethod, Tuple[StaticRoute, ...]] = field(
default_factory=dict
)
def __post_init__(self) -> None:
if TransferMethod.PROXY in self.method_routes_map:
self.report.incoming_report = self.chunk.report
@property
def proxy_filter_dest_map(
self,
) -> Dict[Optional[Filter], Tuple[DataBucket[Any, Any], ...]]:
"""Get dict mapping filters to destinations for proxy transfers"""
filter_dest_map: Dict[Optional[Filter], Set[DataBucket[Any, Any]]] = {}
routes = self.method_routes_map.get(TransferMethod.PROXY, tuple())
for route in routes:
filt = route.filt
if filt not in filter_dest_map:
filter_dest_map[filt] = set(d for d in route.dests)
else:
filter_dest_map[filt].update(route.dests)
return {k: tuple(v) for k, v in filter_dest_map.items()}
def get_dests(self, method: TransferMethod) -> Tuple[DataBucket[Any, Any], ...]:
res = set()
for route in self.method_routes_map.get(method, []):
for dest in route.dests:
res.add(dest)
return tuple(res)
DiffFiltType = Callable[[DataDiff], Optional[DataDiff]]
def make_basic_validator(
diff_filters: Optional[Iterable[DiffFiltType]] = None,
) -> Callable[[Dataset, Dataset], None]:
"""Create validator that logs a warning on any differing elements
List of filter functions can be supplied to modify/delete the diffs
"""
def basic_validator(src_ds: Dataset, dest_ds: Dataset) -> None:
diffs = diff_data_sets(src_ds, dest_ds)
if diff_filters is not None:
warn_diffs = []
d: Optional[DataDiff]
for d in diffs:
for filt in diff_filters:
assert d is not None
d = filt(d)
if d is None:
break
else:
assert d is not None
warn_diffs.append(d)
else:
warn_diffs = diffs
if len(warn_diffs) != 0:
msg = [
"Found differeing elements for ds %s:" % "/".join(get_all_uids(src_ds))
]
for d in warn_diffs:
msg.append(textwrap.indent(str(d), "\t"))
log.warn("\n".join(msg))
return basic_validator
T = TypeVar("T")
async def _sync_iter_to_async(sync_gen: Iterator[T]) -> AsyncIterator[T]:
for result in sync_gen:
yield result
TransferReportTypes = Union[
DynamicTransferReport,
StaticTransferReport,
]
DestType = Union[DataBucket, Route]
SourceMissingQueryReportType = MultiListReport[MultiListReport[DicomOpReport]]
DestMissingQueryReportType = MultiDictReport[
DataRepo[Any, Any, Any, Any], SourceMissingQueryReportType
]
class RepoRequiredError(Exception):
"""Operation requires a DataRepo but a DataBucket was provided"""
class SyncQueriesReport(MultiAttrReport):
"""Report for queries being performed during sync"""
def __init__(
self,
description: Optional[str] = None,
meta_data: Optional[Dict[str, Any]] = None,
depth: int = 0,
prog_hook: Optional[ProgressHookBase[Any]] = None,
):
self._init_src_qr_report: Optional[MultiListReport[DicomOpReport]] = None
self._missing_src_qr_reports: Optional[
MultiListReport[SourceMissingQueryReportType]
] = None
self._missing_dest_qr_reports: Optional[
MultiListReport[DestMissingQueryReportType]
] = None
self._report_attrs = [
"_init_src_qr_report",
"_missing_src_qr_reports",
"_missing_dest_qr_reports",
]
super().__init__(description, meta_data, depth, prog_hook)
@property
def init_src_qr_report(self) -> MultiListReport[DicomOpReport]:
if self._init_src_qr_report is None:
self._init_src_qr_report = MultiListReport(
"init-src-qr", depth=self._depth + 1, prog_hook=self._prog_hook
)
return self._init_src_qr_report
@property
def missing_src_qr_reports(self) -> MultiListReport[SourceMissingQueryReportType]:
if self._missing_src_qr_reports is None:
self._missing_src_qr_reports = MultiListReport(
"missing-src-qrs", depth=self._depth + 1, prog_hook=self._prog_hook
)
return self._missing_src_qr_reports
@property
def missing_dest_qr_reports(self) -> MultiListReport[DestMissingQueryReportType]:
if self._missing_dest_qr_reports is None:
self._missing_dest_qr_reports = MultiListReport(
"missing-dest-qrs", depth=self._depth + 1, prog_hook=self._prog_hook
)
return self._missing_dest_qr_reports
class SyncReport(MultiAttrReport):
"""Top level report from a sync operation"""
def __init__(
self,
description: Optional[str] = None,
meta_data: Optional[Dict[str, Any]] = None,
depth: int = 0,
prog_hook: Optional[ProgressHookBase[Any]] = None,
):
self._queries_report: Optional[SyncQueriesReport] = None
self.trans_reports: MultiListReport[TransferReportTypes] = MultiListReport(
"transfers", depth=depth + 1, prog_hook=prog_hook
)
self._report_attrs = ["_queries_report", "trans_reports"]
super().__init__(description, meta_data, depth, prog_hook)
@property
def queries_report(self) -> SyncQueriesReport:
if self._queries_report is None:
self._queries_report = SyncQueriesReport(
"sync-queries", depth=self._depth + 1, prog_hook=self._prog_hook
)
return self._queries_report
# TODO: Does it make more sense to allow a query to be passed in here instead
# having the base_query in the NetRepo? We only have once source here,
# and the base_query functionality could be confusing, so I lean towards
# yes.
#
# How would the query/query_res interact though? Seems like the query_res
# should override the query instead of the query refining the query_res?
# This would be different than every other part of our API that takes
# both though...
# TODO: If we end up needing to do the query ourselves, we should include any
# req_elems from any DynamicRoutes so we don't have to do those queries
# again in pre-route. One question is how does pre-route know that we
# already tried to query for elements that the remote doesn't provide?
class SyncManager:
"""Can generate and execute transfers needed to sync `src` and `dests`
Data will only be retrieved locally from `src` at most once and then
forwarded to all destinations that need it.
Data that already exists on the destinations will be skipped, unless
`force_all` is set to True.
Parameters
----------
src
The source of data we are transferring
dests
One or more destinations for the data
trust_level
Assume data matches if sub-component counts match at this level
Setting this to a level higher than IMAGE can speed up things
significantly at the cost of accuracy. Has no effect if `force_all` is
set to True.
force_all
Don't skip data that already exists on the destinations
keep_errors
Whether or not we try to sync erroneous data
report
Allows live introspection of the sync process, detailed results
"""
def __init__(
self,
src: DataBucket[Any, Any],
dests: List[DestType],
trust_level: QueryLevel = QueryLevel.IMAGE,
force_all: bool = False,
keep_errors: Union[bool, Tuple[IncomingErrorType, ...]] = False,
validators: Optional[Iterable[Callable[[Dataset, Dataset], None]]] = None,
report: Optional[SyncReport] = None,
):
self._src = src
self._trust_level = trust_level
self._force_all = force_all
self._keep_errors: Tuple[IncomingErrorType, ...]
if keep_errors == False:
self._keep_errors = tuple()
elif keep_errors == True:
self._keep_errors = tuple(IncomingErrorType)
else:
keep_errors = cast(Tuple[IncomingErrorType, ...], keep_errors)
self._keep_errors = keep_errors
self._validators = validators
if report is None:
self._extern_report = False
self.report = SyncReport()
else:
self._extern_report = True
self.report = report
self.report._meta_data["source"] = self._src
self.report._meta_data["dests"] = dests
self.report._meta_data["trust_level"] = self._trust_level.name
self.report._meta_data["force_all"] = self._force_all
self.report._meta_data["keep_errors"] = self._keep_errors
# Make sure all dests are Route objects
self._routes = []
plain_dests: List[DataBucket[Any, Any]] = []
for dest in dests:
if isinstance(dest, Route):
self._routes.append(dest)
else:
plain_dests.append(dest)
if plain_dests:
self._routes.append(StaticRoute(tuple(plain_dests)))
if len(self._routes) == 0:
raise ValueError("At least one dest must be specified")
self._has_filt = any(r.filt is not None for r in self._routes)
# Precompute TransferMethod to routes map for static routes
self._static_meth_routes = self._get_meth_routes(
r for r in self._routes if isinstance(r, StaticRoute)
)
# Create an internal Router object
self._router = Router(self._routes)
# If we need to do dynamic routing due to a naive data source, make
# sure it is possible
if (
not isinstance(self._src, DataRepo)
and self._router.has_dynamic_routes
and not self._router.can_dyn_route
):
raise NoValidTransferMethodError()
async def gen_transfers(
self,
query_res: QueryResult = None,
) -> AsyncIterator[Transfer[Any]]:
"""Generate the needed transfers
Parameters
----------
query_res
Only transfer data that matches this QueryResult
"""
if self.report.done:
raise ValueError("The SyncManager has been closed")
if not isinstance(self._src, DataRepo) and query_res is not None:
raise RepoRequiredError("Can't pass in query_res with naive " "data source")
# If any routes have filters, we need to tell the chunks to keep
# inconsistent/duplicate data so the filters have a chance to fix it
_chunk_keep_errors = set(self._keep_errors)
if self._has_filt:
_chunk_keep_errors.add(IncomingErrorType.INCONSISTENT)
_chunk_keep_errors.add(IncomingErrorType.DUPLICATE)
chunk_keep_errors = tuple(_chunk_keep_errors)
n_trans = 0
res: Transfer[Any]
if not isinstance(self._src, DataRepo) or not self._router.can_pre_route:
log.info(f"Processing all data from data source: {self._src}")
if self._src.n_chunks is not None:
self.report.trans_reports.n_expected = self._src.n_chunks
async for chunk in self._src.gen_chunks():
chunk.keep_errors = chunk_keep_errors
if self._router.has_dynamic_routes:
chunk.report.prog_hook = self.report._prog_hook
res = DynamicTransfer(chunk)
else:
res = StaticTransfer(
chunk, method_routes_map=self._static_meth_routes.copy()
)
self.report.trans_reports.append(res.report)
n_trans += 1
yield res
else:
# We have a smart data repo and can precompute any dynamic routing
# and try to avoid transferring data that already exists
log.info(f"Processing select data from source: {self._src}")
src_qr_reports = self.report.queries_report.missing_src_qr_reports
dest_qr_reports = self.report.queries_report.missing_dest_qr_reports
expected_sub_qrs = None
qr_gen: AsyncIterator[QueryResult]
if query_res is not None:
gen_level = min(query_res.level, QueryLevel.STUDY)
qr_gen = _sync_iter_to_async(query_res.level_sub_queries(gen_level))
expected_sub_qrs = query_res.get_count(gen_level)
else:
q = dict_to_ds({elem: "*" for elem in self._router.required_elems})
qr_report = self.report.queries_report.init_src_qr_report
qr_gen = self._src.queries(QueryLevel.STUDY, q, report=qr_report)
n_sub_qr = 0
avg_trans_per_qr = None
async for sub_qr in qr_gen:
log.info(
"Processing %d out of %s sub-qr: %s",
n_sub_qr,
str(expected_sub_qrs),
sub_qr.to_line(),
)
if expected_sub_qrs is None and qr_report.done:
expected_sub_qrs = qr_report.n_input
if expected_sub_qrs is not None:
if avg_trans_per_qr is not None:
self.report.trans_reports.n_expected = int(
round(expected_sub_qrs * avg_trans_per_qr)
)
if src_qr_reports.n_expected is None:
src_qr_reports.n_expected = expected_sub_qrs
# dest_qr_reports.n_expected = expected_sub_qrs
# Resolve any dynamic routes, so all routes are static. Produce dict mapping
# these static routes to QueryResults specifying the data to send
sr_qr_map = await self._router.pre_route(self._src, query_res=sub_qr)
pre_count = n_trans
for static_routes, qr in sr_qr_map.items():
if self._force_all:
missing_info = {tuple(static_routes): [qr]}
else:
missing_info = await self._get_missing(static_routes, qr)
if len(missing_info) == 0:
log.info("No transfers necessary for this sub-qr")
else:
log.info("Producing transfers for this sub-qr")
for sub_routes, missing_qrs in missing_info.items():
meth_routes = self._get_meth_routes(sub_routes)
for missing_qr in missing_qrs:
async for chunk in self._src.gen_query_chunks(missing_qr):
chunk.keep_errors = chunk_keep_errors
if len(meth_routes) == 0:
# TODO: Remove this branch?
log.error("No meth_routes!")
assert False
# import pdb ; pdb.set_trace()
res = StaticTransfer(
chunk, method_routes_map=meth_routes.copy()
)
self.report.trans_reports.append(res.report)
n_trans += 1
yield res
if avg_trans_per_qr is None:
if n_trans != 0:
avg_trans_per_qr = n_trans / (n_sub_qr + 1)
else:
avg_trans_per_qr = max(
0.1,
((avg_trans_per_qr * n_sub_qr) + (n_trans - pre_count))
/ (n_sub_qr + 1),
)
n_sub_qr += 1
src_qr_reports.done = True
dest_qr_reports.done = True
self.report.queries_report.done = True
log.info(f"Generated {n_trans} transfers")
async def exec_transfer(self, transfer: Transfer[Any]) -> None:
"""Execute the given transfer"""
if self.report.done:
raise ValueError("The SyncManager has been closed")
if isinstance(transfer, DynamicTransfer):
log.info("Executing dynamic transfer: %s", transfer)
await self._do_dynamic_transfer(transfer)
elif isinstance(transfer, StaticTransfer):
log.info("Executing static transfer: %s", transfer)
await self._do_static_transfer(transfer)
else:
raise TypeError("Not a valid Transfer sub-class: %s" % transfer)
async def sync(self, query_res: QueryResult = None) -> None:
"""Generate and exec all transfers for `query_res`"""
async for trans in self.gen_transfers(query_res):
await self.exec_transfer(trans)
# For now the below three methods don't actually need to be async, but we
# implement them as such so we can leverage it in the future without API
# breakage.
async def close(self) -> None:
if not self._extern_report:
self.report.log_issues()
self.report.check_errors()
self.report.trans_reports.done = True
self.report.done = True
async def __aenter__(self) -> SyncManager:
return self
async def __aexit__(
self,
exctype: Optional[Type[BaseException]],
excinst: Optional[BaseException],
exctb: Optional[TracebackType],
) -> None:
await self.close()
def _get_meth_routes(
self, routes: Iterable[StaticRoute]
) -> Dict[TransferMethod, Tuple[StaticRoute, ...]]:
method_routes_map: Dict[TransferMethod, List[StaticRoute]] = {}
for route in routes:
method = route.get_method(self._src)
if method not in method_routes_map:
method_routes_map[method] = []
method_routes_map[method].append(route)
return {k: tuple(v) for k, v in method_routes_map.items()}
async def _get_missing(
self, static_routes: Tuple[StaticRoute, ...], query_res: QueryResult
) -> Dict[Tuple[StaticRoute, ...], List[QueryResult]]:
"""Determine what data is missing for the `static_routes`
Each route might produce naive (i.e. `DataBucket`) or smart (i.e.
DataRepo) destinations, but we can't do much in the naive case except
assume that all of the data is missing.
Parameters
----------
static_routes : list of StaticRoute
Determines one or more destinations for the data
query_res : QueryResult
Subset of data we want to check the existance of on destinations
Returns
-------
missing_info : dict
Maps tuples of routes to list of QueryResults that specify the
data missing from that set of destinations.
"""
assert isinstance(self._src, DataRepo)
log.debug("Finding missing data for src %s" % self._src)
src_qr = query_res
# Pair up dests with filters and split into two groups, those we can
# check for missing data and those we can not
dest_filt_tuples: List[Tuple[DataBucket[Any, Any], Optional[Filter]]] = []
checkable: List[Tuple[DataRepo[Any, Any, Any, Any], Optional[Filter]]] = []
non_checkable = []
df_trans_map = {}
for route in static_routes:
filt = route.filt
can_invert_uids = True
if filt is not None:
invertible_uids = filt.invertible_uids
can_invert_uids = all(
uid in invertible_uids for uid in uid_elems.values()
)
for dest in route.dests:
df_tuple = (dest, filt)
dest_filt_tuples.append(df_tuple)
df_trans_map[df_tuple] = get_transform(src_qr, filt)
if isinstance(dest, DataRepo) and can_invert_uids:
df_tuple = cast(
Tuple[DataRepo[Any, Any, Any, Any], Optional[Filter]], df_tuple
)
checkable.append(df_tuple)
else:
non_checkable.append(df_tuple)
# Can't check any dests to see what is missing, so nothing to do
if len(checkable) == 0:
return {tuple(static_routes): [query_res]}
# Build multi reports for capturing queries
expected = QueryLevel.IMAGE - src_qr.level + 1
src_queries_report: MultiListReport[
MultiListReport[DicomOpReport]
] = MultiListReport("missing-src-qr", n_expected=expected)
self.report.queries_report.missing_src_qr_reports.append(src_queries_report)
dest_queries_report: MultiDictReport[
DataRepo[Any, Any, Any, Any],
MultiListReport[MultiListReport[DicomOpReport]],
] = MultiDictReport("missing-dests-qr")
self.report.queries_report.missing_dest_qr_reports.append(dest_queries_report)
# We group data going to same sets of destinations
res: Dict[
Tuple[Tuple[DataRepo[Any, Any, Any, Any], Optional[Filter]], ...],
List[QueryResult],
] = {}
for n_dest in reversed(range(1, len(checkable) + 1)):
for df_set in itertools.combinations(checkable, n_dest):
if df_set not in res:
res[tuple(df_set)] = []
# Check for missing data at each query level, starting from coarsest
# (i.e. entire missing patients, then studies, etc.)
curr_matching = {df: df_trans_map[df].new for df in checkable}
curr_src_qr = src_qr
for curr_level in range(src_qr.level, QueryLevel.IMAGE + 1):
curr_level = QueryLevel(curr_level)
if len(curr_src_qr) == 0:
src_queries_report.n_expected = src_queries_report.n_input
# for dest, dreports in dest_queries_report.items():
# dreports.n_expected = dreports.n_input
break
log.debug("Checking for missing data at level %s" % curr_level)
# Compute what is missing for each dest and matching for any dest
# at this level
missing = {}
full_matching: Optional[QueryResult] = None
for df in checkable:
dest, filt = df
log.debug("Checking for missing data on dest: %s", dest)
assert isinstance(dest, DataRepo)
if curr_level > curr_src_qr.level:
# We need more details for the source QueryResult
log.debug("Querying src in _get_missing more details")
src_report: MultiListReport[DicomOpReport] = MultiListReport()
src_queries_report.append(src_report)
src_qr_task = asyncio.create_task(
self._src.query(
level=curr_level, query_res=curr_src_qr, report=src_report
)
)
if dest not in dest_queries_report:
dest_reports: MultiListReport[
MultiListReport[DicomOpReport]
] = MultiListReport("missing-dest-qr")
dest_queries_report[dest] = dest_reports
else:
dest_reports = dest_queries_report[dest]
dest_report: MultiListReport[DicomOpReport] = MultiListReport()
dest_reports.append(dest_report)
dest_qr = await dest.query(
level=curr_level, query_res=curr_matching[df], report=dest_report
)
if curr_level > curr_src_qr.level:
curr_src_qr = await src_qr_task
df_trans_map = {
df: get_transform(curr_src_qr & qr_trans.old, df[1])
for df, qr_trans in df_trans_map.items()
}
curr_qr_trans = df_trans_map[df]
missing[df] = curr_qr_trans.old.sub(
curr_qr_trans.reverse(dest_qr).qr, ignore_subcounts=True
)
matching = curr_qr_trans.new & dest_qr
if curr_level == self._trust_level:
done_uids = []
for uid in matching.uids():
node = DataNode(curr_level, uid)
if curr_qr_trans.new.sub_query(
node, curr_level
) == dest_qr.sub_query(node):
done_uids.append(uid)
for uid in done_uids:
del matching[uid]
curr_matching[df] = matching
old_matching = curr_qr_trans.reverse(matching).qr
df_trans_map[df] = get_transform(old_matching, filt)
if full_matching is None:
full_matching = old_matching
else:
full_matching |= old_matching
log.debug(
"missing = %s \n matching = %s", missing[df], curr_matching[df]
)
log.debug("full matching = %s", full_matching)
# Reduce the source qr to only data that matches on at least one dest
if full_matching is not None:
log.debug("remaing pre-update = %s", curr_src_qr)
curr_src_qr = curr_src_qr & full_matching
log.debug("remaining post-update = %s", curr_src_qr)
# Update the results with the missing data for this level
for df_set, qr_list in res.items():
# Build set of all missing data across destinations
set_missing = None
for df in df_set:
if set_missing is None:
set_missing = deepcopy(missing[df])
else:
set_missing = set_missing & missing[df]
assert set_missing is not None
if len(set_missing) > 0:
for df in df_set:
missing[df] -= set_missing
if len(qr_list) > 0 and qr_list[-1].level == set_missing.level:
qr_list[-1] |= set_missing
else:
qr_list.append(set_missing)
# Mark our reports as done
src_queries_report.done = True
for _, dreport in dest_queries_report.items():
dreport.done = True
dest_queries_report.done = True
# Convert back to routes and return result
sr_res = {}
for df_set, qr_list in res.items():
if len(qr_list) == 0:
continue
filt_dest_map: Dict[Optional[Filter], List[DataBucket[Any, Any]]] = {}
for dest, filt in df_set:
if filt not in filt_dest_map:
filt_dest_map[filt] = []
filt_dest_map[filt].append(dest)
routes = []
for filt, dests in filt_dest_map.items():
routes.append(StaticRoute(tuple(dests), filt=filt))
sr_res[tuple(routes)] = qr_list
return sr_res
async def _do_dynamic_transfer(self, transfer: DynamicTransfer) -> None:
# TODO: We could keep this context manager open until the
# TransferExecutor.close method is called, and thus avoid some
# overhead from opening/closing associations, but this makes the
# reporting quite tricky, and each transfer should be relatively
# slow compared to the overhead of setting up and tearing down
# associations.
async with self._router.route(report=transfer.report) as routing_q:
async for ds in transfer.chunk.gen_data():
await routing_q.put(ds)
async def _do_static_proxy_transfer(
self, transfer: StaticTransfer, report: StaticProxyTransferReport
) -> None:
filter_dest_map = transfer.proxy_filter_dest_map
n_filt = len(filter_dest_map)
if None in filter_dest_map:
n_filt -= 1
dests = transfer.get_dests(TransferMethod.PROXY)
async with AsyncExitStack() as stack:
d_q_map = {}
log.info("Starting up senders...")
for dest in dests:
store_rep = dest.get_empty_send_report()
store_rep.n_expected = transfer.chunk.n_expected
report.add_store_report(dest, store_rep)
d_q_map[dest] = await stack.enter_async_context(
dest.send(report=store_rep)
)
log.info("All send queues are initialized, starting incoming data stream")
async for ds in transfer.chunk.gen_data():
if n_filt:
orig_ds = deepcopy(ds)
else:
orig_ds = ds
min_orig_ds = minimal_copy(orig_ds)
for filt, sub_dests in filter_dest_map.items():
static_route = StaticRoute(sub_dests, filt=filt)
sub_queues = [d_q_map[d] for d in sub_dests]
if filt is not None:
filt_ds = filt(orig_ds)
if filt_ds is not None:
min_filt_ds = minimal_copy(filt_ds)
else:
filt_ds = orig_ds
min_filt_ds = min_orig_ds
if filt_ds is None:
continue
if not report.add(static_route, min_orig_ds, min_filt_ds):
continue
if filt_ds is not None:
log.debug("Pushing data set onto send queues")
for q in sub_queues:
await q.put(filt_ds)
log.debug("Data was added to all send queues")
log.info("Finished generating / queueing all data from chunk")
log.info("Shutdown all senders")
report.done = True
async def _do_static_transfer(self, transfer: StaticTransfer) -> None:
# TODO: Can't automatically overlap the proxy and out-of-band transfers
# since they both may require associations with the same src.
# Would need to know the available resources, and those needed
# by each transfer, including a way for a transfer to reserve
# resources for future use
#
# Our current API also doesn't allow the user to do this manually...
oob_report = None
for method, routes in transfer.method_routes_map.items():
if method == TransferMethod.PROXY:
proxy_report = transfer.report.proxy_report
proxy_report.keep_errors = self._keep_errors
log.info("Doing proxy transfer")
await self._do_static_proxy_transfer(transfer, proxy_report)
else:
if oob_report is None:
oob_report = transfer.report.oob_report
oob_report[method] = StaticStoreReport()
for dest in transfer.get_dests(method):
oob_dest = cast(OobCapable[Any, Any], dest)
log.info("Doing out-of-band transfer to: %s", dest)
oob_report[method][dest] = oob_dest.get_empty_oob_report()
oob_report[method][dest].n_expected = transfer.chunk.n_expected
oob_report[method][dest].description = f"{method} to {dest}"
await oob_dest.oob_transfer(
method, transfer.chunk, report=oob_report[method][dest]
)
oob_report[method].done = True
if oob_report is not None:
oob_report.done = True
transfer.report.done = True
async def sync_data(
sources: List[DataBucket[Any, Any]],
dests: List[DestType],
query: Optional[Union[Dataset, List[Dataset]]] = None,
query_res: Optional[List[Optional[QueryResult]]] = None,
query_reports: Optional[List[Optional[MultiListReport[DicomOpReport]]]] = None,
sm_kwargs: Optional[List[Dict[str, Any]]] = None,
dry_run: bool = False,
) -> List[SyncReport]:
"""Sync data from one or more sources to one or more destinations
The `query` paramater can be a single item that applies to all sources or
a list of items (one for each source).
The data from each source if forwarded to all destinations (unless those
dests filter it).
Parameters
----------
sources
The sources we are syncing data from
dests
The destinations we are syncing data to
query
A query (or list w/ one per source) to limit the data being sent
query_res
A list of query results (one per source) to limit the data being sent
sm_kwargs
The keyword arguments used to create each SyncManager
dry_run
Set to true to just print the transfers that would get executed
"""
if query is not None or query_res is not None:
if not all(isinstance(src, DataRepo) for src in sources):
raise ValueError("Not all sources support queries")
n_srcs = len(sources)
if query_res is not None:
if len(query_res) != n_srcs:
raise ValueError("The query_res list is not the right size")
else:
query_res = [None] * n_srcs
if sm_kwargs is not None:
if len(sm_kwargs) != n_srcs:
raise ValueError("The sm_kwargs list is not the right size")
else:
sm_kwargs = [{} for _ in range(n_srcs)]
if query is not None:
if query_reports is not None:
if len(query_reports) != n_srcs:
raise ValueError("The query_reports list is not the right size")
else:
query_reports = [None] * n_srcs
if isinstance(query, Dataset):
query = [query] * n_srcs
elif len(query) != n_srcs:
raise ValueError("The query list is not the right size")
qr_tasks = []
for src, q, qr, qr_report in zip(sources, query, query_res, query_reports):
assert isinstance(src, DataRepo)
qr_tasks.append(src.query(query=q, query_res=qr, report=qr_report))
log.info("Performing initial queries against sources")
query_res = await asyncio.gather(*qr_tasks, return_exceptions=True)
keep_qrs = []
for src_idx, qr in enumerate(query_res):
if isinstance(qr, Exception):
log.warning(
"Skipping sync on source '%s' due to error during initial query: %s",
sources[src_idx],
qr,
)
del sources[src_idx]
del sm_kwargs[src_idx]
continue
keep_qrs.append(qr)
query_res = keep_qrs
sync_mgrs = [
SyncManager(src, dests, **kwargs) for src, kwargs in zip(sources, sm_kwargs)
]
if dry_run:
log.info("Starting dry-run")
async with AsyncExitStack() as estack:
sync_tasks: List[asyncio.Task[None]] = []
for sm, qr in zip(sync_mgrs, query_res):
if qr is not None:
log.info("Processing qr: %s", qr.to_line())
else:
log.info("Processing qr: None")
await estack.enter_async_context(sm)
async for transfer in sm.gen_transfers(qr):
if isinstance(transfer, StaticTransfer):
dests_comps = []
for meth, routes in transfer.method_routes_map.items():
for route in routes:
dests_comps.append(f"({meth.name}) {route}")
dests_str = " / ".join(dests_comps)
else:
dests_str = "DYNAMIC" # TODO: Something better here?
# Mark transfer reports done to avoid warning when sync report
# is marked done
transfer.report.done = True
print("%s > %s" % (transfer.chunk, dests_str))
log.info("Completed dry-run")
else:
log.info("Starting sync")
async with AsyncExitStack() as estack:
sync_tasks = []
for sm, qr in zip(sync_mgrs, query_res):
await estack.enter_async_context(sm)
sync_tasks.append(asyncio.create_task(sm.sync(qr)))
await asyncio.gather(*sync_tasks)
log.info("Completed sync")
return [sm.report for sm in sync_mgrs]
```
#### File: dcm/tests/test_sync.py
```python
import asyncio
from pathlib import Path
from copy import deepcopy
import pytest
from pytest import fixture, mark
from ..query import QueryResult, QueryLevel
from ..net import LocalEntity
from ..route import StaticRoute, DynamicRoute, Router
from ..sync import SyncManager
from ..store import TransferMethod
from ..store.net_repo import NetRepo
from ..store.local_dir import LocalDir
from ..util import json_serializer
from .conftest import (
has_dcmtk,
DCMTK_VERSION,
dcmtk_priv_sop_retr_xfail,
dcmtk_priv_sop_send_xfail,
)
priv_sop_marks = [dcmtk_priv_sop_retr_xfail, dcmtk_priv_sop_send_xfail]
def make_lookup(dest1, dest2):
def lookup_func(ds):
if ds.PatientID == "TestPat1":
return [dest1]
else:
return [dest2]
return lookup_func
repo_to_repo_subsets = [
pytest.param([None] * 3, marks=priv_sop_marks),
["all"] * 3,
["PATIENT-0"] * 3,
["PATIENT-0/STUDY-1"] * 3,
pytest.param(["PATIENT-0/STUDY-0"] * 3, marks=priv_sop_marks),
pytest.param(["PATIENT-0/STUDY-0/SERIES-0"] * 3, marks=priv_sop_marks),
pytest.param(["PATIENT-0/STUDY-0/SERIES-0/IMAGE-0"] * 3, marks=priv_sop_marks),
pytest.param(["PATIENT-1"] * 3, marks=priv_sop_marks),
]
bucket_to_repo_subsets = [
pytest.param([None] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(["all"] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(["PATIENT-0"] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(["PATIENT-0/STUDY-1"] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(["PATIENT-0/STUDY-0"] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(["PATIENT-0/STUDY-0/SERIES-0"] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(
["PATIENT-0/STUDY-0/SERIES-0/IMAGE-0"] * 3, marks=dcmtk_priv_sop_send_xfail
),
pytest.param(["PATIENT-1"] * 3, marks=dcmtk_priv_sop_send_xfail),
]
@mark.parametrize(
"subset_specs",
[
[None] * 3,
["all"] * 3,
["PATIENT-0"] * 3,
["PATIENT-0/STUDY-0"] * 3,
["PATIENT-0/STUDY-0/SERIES-0"] * 3,
["PATIENT-0/STUDY-0/SERIES-0/IMAGE-0"] * 3,
["PATIENT-1"] * 3,
],
)
@mark.asyncio
@has_dcmtk
async def test_gen_transfers(make_local_node, make_dcmtk_net_repo, subset_specs):
local_node = make_local_node()
src_repo, full_qr, _ = make_dcmtk_net_repo(local_node, subset="all")
dest1_repo, dest1_init_qr, _ = make_dcmtk_net_repo(
local_node, subset=subset_specs[0]
)
dest2_repo, dest2_init_qr, _ = make_dcmtk_net_repo(
local_node, subset=subset_specs[1]
)
dest3_repo, dest3_init_qr, _ = make_dcmtk_net_repo(
local_node, subset=subset_specs[2]
)
static_route = StaticRoute([dest1_repo])
dyn_lookup = make_lookup(dest2_repo, dest3_repo)
dyn_route = DynamicRoute(dyn_lookup, required_elems=["PatientID"])
dests = [static_route, dyn_route]
# Build QRs of what we expect to be transfered to each dest
expect_qrs = {
dest1_repo: full_qr - dest1_init_qr,
dest2_repo: QueryResult(QueryLevel.IMAGE),
dest3_repo: QueryResult(QueryLevel.IMAGE),
}
for ds in full_qr:
dyn_dests = dyn_lookup(ds)
for dest in dyn_dests:
expect_qrs[dest].add(ds)
trans_qrs = {}
async with SyncManager(src_repo, dests) as sm:
async for transfer in sm.gen_transfers():
trans_level = transfer.chunk.qr.level
for route in transfer.method_routes_map[TransferMethod.PROXY]:
for dest in route.dests:
print(f"\n{dest} :\n{transfer.chunk.qr.to_tree()}")
if dest not in trans_qrs:
trans_qrs[dest] = {}
if trans_level not in trans_qrs[dest]:
trans_qrs[dest][trans_level] = deepcopy(transfer.chunk.qr)
else:
for ds in transfer.chunk.qr:
# Check this data is expected
assert ds in expect_qrs[dest]
# Check for duplicate transfers
for lvl_qr in trans_qrs[dest].values():
assert ds not in lvl_qr
trans_qrs[dest][trans_level].add(ds)
@mark.parametrize("subset_specs", repo_to_repo_subsets)
@mark.asyncio
@has_dcmtk
async def test_repo_sync_single_static(
make_local_node, make_dcmtk_net_repo, subset_specs
):
local_node = make_local_node()
src_repo, full_qr, _ = make_dcmtk_net_repo(local_node, subset="all")
dest1_repo, _, dest1_dir = make_dcmtk_net_repo(local_node, subset=subset_specs[0])
static_route = StaticRoute([dest1_repo])
dests = [static_route]
async with SyncManager(src_repo, dests) as sm:
async for transfer in sm.gen_transfers():
for route in transfer.method_routes_map[TransferMethod.PROXY]:
for dest in route.dests:
print(f"{dest} : {json_serializer.dumps(transfer.chunk.qr)}")
await sm.exec_transfer(transfer)
print(sm.report)
dest1_dir = Path(dest1_dir)
found_files = [x for x in dest1_dir.glob("**/*.dcm")]
print(found_files)
assert len(found_files) == len(full_qr)
@mark.parametrize("subset_specs", repo_to_repo_subsets)
@mark.asyncio
@has_dcmtk
async def test_repo_sync_multi(make_local_node, make_dcmtk_net_repo, subset_specs):
local_node = make_local_node()
src_repo, full_qr, _ = make_dcmtk_net_repo(local_node, subset="all")
dest1_repo, _, dest1_dir = make_dcmtk_net_repo(local_node, subset=subset_specs[0])
dest2_repo, _, _ = make_dcmtk_net_repo(local_node, subset=subset_specs[1])
dest3_repo, _, _ = make_dcmtk_net_repo(local_node, subset=subset_specs[2])
static_route = StaticRoute([dest1_repo])
dyn_route = DynamicRoute(
make_lookup(dest2_repo, dest3_repo), required_elems=["PatientID"]
)
dests = [static_route, dyn_route]
async with SyncManager(src_repo, dests) as sm:
async for transfer in sm.gen_transfers():
for route in transfer.method_routes_map[TransferMethod.PROXY]:
for dest in route.dests:
print(f"{dest} : {transfer.chunk.qr}")
await sm.exec_transfer(transfer)
print(sm.report)
dest1_dir = Path(dest1_dir)
found_files = [x for x in dest1_dir.glob("**/*.dcm")]
print(found_files)
assert len(found_files) == len(full_qr)
# TODO: Check that dynamic routing worked correctly
@mark.parametrize("subset_specs", bucket_to_repo_subsets)
@mark.asyncio
@has_dcmtk
async def test_bucket_sync(
make_local_dir, make_local_node, make_dcmtk_net_repo, subset_specs
):
src_bucket, init_qr, _ = make_local_dir("all", max_chunk=2)
local_node = make_local_node()
dest1_repo, _, dest1_dir = make_dcmtk_net_repo(local_node, subset=subset_specs[0])
dest2_repo, _, _ = make_dcmtk_net_repo(local_node, subset=subset_specs[1])
dest3_repo, _, _ = make_dcmtk_net_repo(local_node, subset=subset_specs[2])
static_route = StaticRoute([dest1_repo])
dyn_route = DynamicRoute(
make_lookup(dest2_repo, dest3_repo), required_elems=["PatientID"]
)
dests = [static_route, dyn_route]
async with SyncManager(src_bucket, dests) as sm:
async for transfer in sm.gen_transfers():
await sm.exec_transfer(transfer)
dest1_dir = Path(dest1_dir)
found_files = [x for x in dest1_dir.glob("**/*.dcm")]
print(found_files)
assert len(found_files) == len(init_qr)
``` |
{
"source": "joshyjoseph/react-docker-swagger-demo",
"score": 3
} |
#### File: flask_app/flask_api/main.py
```python
import json
from flask import Flask, request, Response
from flask_cors import CORS
from model import db, TeamModel
import random
from flask_swagger_ui import get_swaggerui_blueprint
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
SWAGGER_URL = '/doc'
API_URL = '/static/swagger.json'
SWAGGERUI_BLUEPRINT = get_swaggerui_blueprint(
SWAGGER_URL,
API_URL,
config={
'app_name': "DemoApp"
}
)
app.register_blueprint(SWAGGERUI_BLUEPRINT, url_prefix=SWAGGER_URL)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://demo_user:abcd1234@db:5432/demo_db'
db.init_app(app)
with app.app_context():
db.create_all()
db.session.commit()
print("created the db")
@app.route('/add', methods=['POST'])
def add_team():
if request.method == 'POST':
team_name = request.json.get('teamName')
team_role = request.json.get('teamRole')
new_record = TeamModel(teamName=team_name, teamRole=team_role)
print('new_record', new_record)
db.session.add(new_record)
db.session.commit()
return Response(json.dumps({"status": "success"}), status=200, mimetype="application/json")
else:
return Response(json.dumps({"status": "failed : unsupported method type"}), status=405, mimetype="application/json")
@app.route('/fetch/', methods=['GET'])
def fetch_all_info():
records = db.session.query(TeamModel).order_by(TeamModel.teamName).all()
res = []
for record in records:
res.append(dict(
rowId=record.rowId,
teamName=record.teamName,
teamRole=record.teamRole
))
resp = Response(json.dumps(res), status=200, mimetype="application/json")
return resp
@app.route('/fetch/<string:teamName>/', methods=['GET'])
def get_team_info(teamName):
records = TeamModel.query.filter_by(teamName=teamName)
res = []
for record in records:
res.append(dict(
rowId=record.rowId,
teamName=record.teamName,
teamRole=record.teamRole
))
return Response(json.dumps(res), status=200, mimetype="application/json")
@app.route("/", methods=['GET'])
def main():
return {"status": "success"}
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=5678)
```
#### File: flask_app/flask_api/model.py
```python
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class TeamModel(db.Model):
rowId = db.Column(db.Integer, primary_key=True)
teamName = db.Column(db.String)
teamRole = db.Column(db.String)
def __repr__(self) -> str:
return "{}:{}:{}".format(self.rowId, self.teamName, self.teamRole)
``` |
{
"source": "joshyka/fedn",
"score": 2
} |
#### File: sdk/cli/run_cmd.py
```python
import click
from .main import main
import requests
@click.option('--daemon',
is_flag=True,
help=(
"Specify to run in daemon mode."
)
)
@main.group('run')
@click.pass_context
def run_cmd(ctx, daemon):
if daemon:
print('{} NYI should run as daemon...'.format(__file__))
@run_cmd.command('client')
@click.option('-c', '--config', required=False, default='project.yaml')
@click.option('-n', '--name', required=False, default=None)
@click.pass_context
def client_cmd(ctx, config, name):
project = ctx.obj['PROJECT']
from fedn.member.client import Client
client = Client(project)
client.run()
@run_cmd.command('fedavg')
@click.pass_context
@click.option('-c', '--config', required=False, default='project.yaml')
@click.option('-r', '--rounds', required=False, default=1)
@click.option('-a', '--active', required=False, default=2)
@click.option('-t', '--timeout', required=False, default=120)
@click.option('-s', '--seedmodel', required=True)
def fedavg_cmd(ctx, rounds, active, timeout, seedmodel, config):
import fedn.combiner.fedavg as fedavg
# TODO override by input parameters
config = {'round_timeout': timeout, 'seedmodel': seedmodel, 'rounds': rounds, 'active_clients': active}
project = ctx.obj['PROJECT']
# combiner = fedavg.Orchestrator(config=config)
from fedn.combiner.helpers import get_combiner
from fedn.combiner.server import FednServer
server = FednServer(project, get_combiner)
server.run(config)
@run_cmd.command('reducer')
@click.pass_context
def reducer_cmd(ctx, ):
from fedn.reducer.reducer import Reducer
project = ctx.obj['PROJECT']
reducer = Reducer(project)
reducer.run()
```
#### File: fedn/combiner/server.py
```python
from concurrent import futures
import grpc
import time
import uuid
import queue
import threading
import fedn.proto.alliance_pb2 as alliance
import fedn.proto.alliance_pb2_grpc as rpc
from datetime import datetime, timedelta
from scaleout.repository.helpers import get_repository
from fedn.utils.mongo import connect_to_mongodb
from fedn.combiner.role import Role
####################################################################################################################
# class PredictionServer:
# #TODO add a flask api and run in separate thread.
# pass
def whoami(client, instance):
client.name = instance.id
client.role = role_to_proto_role(instance.role)
return client
def role_to_proto_role(role):
if role == Role.COMBINER:
return alliance.COMBINER
if role == Role.WORKER:
return alliance.WORKER
if role == Role.REDUCER:
return alliance.REDUCER
if role == Role.OTHER:
return alliance.OTHER
class CombinerClient:
def __init__(self, address, port, id, role):
self.id = id
self.role = role
channel = grpc.insecure_channel(address + ":" + str(port))
self.connection = rpc.ConnectorStub(channel)
self.orchestrator = rpc.CombinerStub(channel)
print("ORCHESTRATOR Client: {} connected to {}:{}".format(self.id, address, port))
threading.Thread(target=self.__listen_to_model_update_stream, daemon=True).start()
threading.Thread(target=self.__listen_to_model_validation_stream, daemon=True).start()
def __listen_to_model_update_stream(self):
""" Subscribe to the model update request stream. """
r = alliance.ClientAvailableMessage()
whoami(r.sender, self)
for request in self.orchestrator.ModelUpdateStream(r):
# A client sent a model update to be handled by the combiner
if request.client.name != "reducer":
print("ORCHESTRATOR: received model from client! {}".format(request.client), flush=True)
self.receive_model_candidate(request.model_update_id)
print("Recieved model update.", flush=True)
def __listen_to_model_validation_stream(self):
""" Subscribe to the model update request stream. """
r = alliance.ClientAvailableMessage()
whoami(r.sender, self)
for validation in self.orchestrator.ModelValidationStream(r):
# A client sent a model update to be handled by the combiner
self.receive_validation(validation)
print("Recieved model validation.", flush=True)
def request_model_update(self, model_id, clients=[]):
""" Ask members in from_clients list to update the current global model. """
print("ORCHESTRATOR: Sending to clients {}".format(clients), flush=True)
request = alliance.ModelUpdateRequest()
whoami(request.sender, self)
request.model_id = model_id
request.correlation_id = str(uuid.uuid4())
request.timestamp = str(datetime.now())
if len(clients) == 0:
# Broadcast request to all active member clients
request.receiver.name = ""
request.receiver.role = alliance.WORKER
response = self.orchestrator.SendModelUpdateRequest(request)
else:
# Send to all specified clients
for client in clients:
request.receiver.name = client.name
request.receiver.role = alliance.WORKER
self.orchestrator.SendModelUpdateRequest(request)
print("Requesting model update from clients {}".format(clients), flush=True)
def request_model_validation(self, model_id, from_clients=[]):
""" Send a request for members in from_client to validate the model <model_id>.
The default is to broadcast the request to all active members.
"""
request = alliance.ModelValidationRequest()
whoami(request.sender, self)
request.model_id = model_id
request.correlation_id = str(uuid.uuid4())
request.timestamp = str(datetime.now())
if len(from_clients) == 0:
request.receiver.name = "" # Broadcast request to all active member clients
request.receiver.role = alliance.WORKER
self.orchestrator.SendModelValidationRequest(request)
else:
# Send to specified clients
for client in from_clients:
request.receiver.name = client.name
request.receiver.role = alliance.WORKER
self.orchestrator.SendModelValidationRequest(request)
print("ORCHESTRATOR: Sent validation request for model {}".format(model_id), flush=True)
def _list_clients(self, channel):
request = alliance.ListClientsRequest()
whoami(request.sender, self)
request.channel = channel
clients = self.connection.ListActiveClients(request)
return clients.client
def get_active_trainers(self):
trainers = self._list_clients(alliance.Channel.MODEL_UPDATE_REQUESTS)
return trainers
def get_active_validators(self):
validators = self._list_clients(alliance.Channel.MODEL_VALIDATION_REQUESTS)
return validators
def nr_active_trainers(self):
return len(self.get_active_trainers())
def nr_active_validators(self):
return len(self.get_active_validators())
####################################################################################################################
####################################################################################################################
class FednServer(rpc.CombinerServicer, rpc.ReducerServicer, rpc.ConnectorServicer):
""" Communication relayer. """
def __init__(self, project, get_orchestrator):
self.clients = {}
self.project = project
self.role = Role.COMBINER
self.id = "combiner"
address = "localhost"
port = 12808
try:
unpack = project.config['Alliance']
address = unpack['controller_host']
port = unpack['controller_port']
# self.client = unpack['Member']['name']
except KeyError as e:
print("ORCHESTRATOR: could not get all values from config file {}".format(e))
try:
unpack = self.project.config['Alliance']
address = unpack['controller_host']
port = unpack['controller_port']
self.repository = get_repository(config=unpack['Repository'])
self.bucket_name = unpack["Repository"]["minio_bucket"]
except KeyError as e:
print("ORCHESETRATOR: could not get all values from config file {}".format(e), flush=True)
# get the appropriate combiner class and instantiate with a pointer to the alliance server instance and repository
# self.net = OrchestratorClient(address, port, self.id)
# threading.Thread(target=self.__listen_to_model_update_stream, daemon=True).start()
# threading.Thread(target=self.__listen_to_model_validation_stream, daemon=True).start()
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=100))
# TODO refactor services into separate services
rpc.add_CombinerServicer_to_server(self, self.server)
rpc.add_ConnectorServicer_to_server(self, self.server)
rpc.add_ReducerServicer_to_server(self, self.server)
self.server.add_insecure_port('[::]:' + str(port))
self.orchestrator = get_orchestrator(project)(address, port, self.id, self.role, self.repository)
self.server.start()
# def __get_clients(self):
# return self.clients
def __join_client(self, client):
if not client.name in self.clients.keys():
self.clients[client.name] = {"lastseen": datetime.now()}
print("New client connected:{}".format(client), flush=True)
def _subscribe_client_to_queue(self, client, queue_name):
self.__join_client(client)
if not queue_name in self.clients[client.name].keys():
self.clients[client.name][queue_name] = queue.Queue()
def __get_queue(self, client, queue_name):
try:
return self.clients[client.name][queue_name]
except KeyError:
raise
def __get_status_queue(self, client):
return self.__get_queue(client, alliance.Channel.STATUS)
def _send_request(self, request, queue_name):
self.__route_request_to_client(request, request.receiver, queue_name)
def _broadcast_request(self, request, queue_name):
""" Publish a request to all subscribed members. """
active_clients = self._list_active_clients()
for client in active_clients:
self.clients[client.name][queue_name].put(request)
def __route_request_to_client(self, request, client, queue_name):
try:
q = self.__get_queue(client, queue_name)
q.put(request)
except:
print("Failed to route request to client: {} {}", request.receiver, queue_name)
raise
def _send_status(self, status):
for name, client in self.clients.items():
try:
q = client[alliance.Channel.STATUS]
status.timestamp = str(datetime.now())
q.put(status)
except KeyError:
pass
def __register_heartbeat(self, client):
""" Adds a client entry in the clients dict if first time connecting.
Updates heartbeat timestamp.
"""
self.__join_client(client)
self.clients[client.name]["lastseen"] = datetime.now()
def AllianceStatusStream(self, response, context):
""" A server stream RPC endpoint that emits status messages. """
status = alliance.Status(status="Client {} connecting to AllianceStatusStream.".format(response.sender))
status.log_level = alliance.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
self._subscribe_client_to_queue(response.sender, alliance.Channel.STATUS)
q = self.__get_queue(response.sender, alliance.Channel.STATUS)
self._send_status(status)
while True:
yield q.get()
def SendStatus(self, status: alliance.Status, context):
# Register a heartbeat (if the clients sends a message it is online)
# self.__register_heartbeat(status.client)
# Add the status message to all subscribers of the status channel
self._send_status(status)
response = alliance.Response()
response.response = "Status received."
return response
def _list_subscribed_clients(self, queue_name):
subscribed_clients = []
for name, client in self.clients.items():
if queue_name in client.keys():
subscribed_clients.append(name)
return subscribed_clients
def _list_active_clients(self, channel):
active_clients = []
for client in self._list_subscribed_clients(channel):
# This can break with different timezones.
now = datetime.now()
then = self.clients[client]["lastseen"]
# TODO: move the heartbeat timeout to config.
if (now - then) < timedelta(seconds=30):
active_clients.append(client)
return active_clients
def ListActiveClients(self, request: alliance.ListClientsRequest, context):
""" RPC endpoint that returns a ClientList containing the names of all active clients.
An active client has sent a status message / responded to a heartbeat
request in the last 10 seconds.
"""
clients = alliance.ClientList()
active_clients = self._list_active_clients(request.channel)
for client in active_clients:
clients.client.append(alliance.Client(name=client, role=alliance.WORKER))
return clients
def SendHeartbeat(self, heartbeat: alliance.Heartbeat, context):
""" RPC that lets clients send a hearbeat, notifying the server that
the client is available. """
self.__register_heartbeat(heartbeat.sender)
response = alliance.Response()
response.sender.name = heartbeat.sender.name
response.sender.role = heartbeat.sender.role
response.response = "Heartbeat received"
return response
## Combiner Service
def ModelUpdateStream(self, update, context):
client = update.sender
status = alliance.Status(status="Client {} connecting to ModelUpdateStream.".format(client.name))
status.log_level = alliance.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
self._subscribe_client_to_queue(client, alliance.Channel.MODEL_UPDATES)
q = self.__get_queue(client, alliance.Channel.MODEL_UPDATES)
self._send_status(status)
while True:
yield q.get()
def ModelUpdateRequestStream(self, response, context):
""" A server stream RPC endpoint. Messages from client stream. """
client = response.sender
metadata = context.invocation_metadata()
if metadata:
print("\n\n\nGOT METADATA: {}\n\n\n".format(metadata), flush=True)
status = alliance.Status(status="Client {} connecting to ModelUpdateRequestStream.".format(client.name))
status.log_level = alliance.Status.INFO
whoami(status.sender, self)
# print("Client {} connecting to ModelUpdateRequestStream.".format(client))
self._subscribe_client_to_queue(client, alliance.Channel.MODEL_UPDATE_REQUESTS)
q = self.__get_queue(client, alliance.Channel.MODEL_UPDATE_REQUESTS)
self._send_status(status)
while True:
yield q.get()
def ModelValidationStream(self, update, context):
client = update.sender
status = alliance.Status(status="Client {} connecting to ModelValidationStream.".format(client.name))
status.log_level = alliance.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
# print("Client {} connecting to ModelUpdateStream.".format(client))
self._subscribe_client_to_queue(client, alliance.Channel.MODEL_VALIDATIONS)
q = self.__get_queue(client, alliance.Channel.MODEL_VALIDATIONS)
self._send_status(status)
while True:
yield q.get()
def ModelValidationRequestStream(self, response, context):
""" A server stream RPC endpoint. Messages from client stream. """
client = response.sender
status = alliance.Status(status="Client {} connecting to ModelValidationRequestStream.".format(client.name))
status.log_level = alliance.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
# whoami(status.sender, self)
self._subscribe_client_to_queue(client, alliance.Channel.MODEL_VALIDATION_REQUESTS)
q = self.__get_queue(client, alliance.Channel.MODEL_VALIDATION_REQUESTS)
self._send_status(status)
while True:
yield q.get()
def SendModelUpdateRequest(self, request, context):
""" Send a model update request. """
self._send_request(request, alliance.Channel.MODEL_UPDATE_REQUESTS)
response = alliance.Response()
response.response = "CONTROLLER RECEIVED ModelUpdateRequest from client {}".format(request.sender.name)
return response # TODO Fill later
def SendModelUpdate(self, request, context):
""" Send a model update response. """
# self._send_request(request,alliance.Channel.MODEL_UPDATES)
self.orchestrator.receive_model_candidate(request.model_update_id)
print("ORCHESTRATOR: Received model update", flush=True)
response = alliance.Response()
response.response = "RECEIVED ModelUpdate {} from client {}".format(response, response.sender.name)
return response # TODO Fill later
def SendModelValidationRequest(self, request, context):
""" Send a model update request. """
self._send_request(request, alliance.Channel.MODEL_VALIDATION_REQUESTS)
response = alliance.Response()
response.response = "CONTROLLER RECEIVED ModelValidationRequest from client {}".format(request.sender.name)
return response # TODO Fill later
def SendModelValidation(self, request, context):
""" Send a model update response. """
# self._send_request(request,alliance.Channel.MODEL_VALIDATIONS)
self.orchestrator.receive_validation(request)
print("ORCHESTRATOR received validation ", flush=True)
response = alliance.Response()
response.response = "RECEIVED ModelValidation {} from client {}".format(response, response.sender.name)
return response # TODO Fill later
## Reducer Service
def GetGlobalModel(self, request, context):
print("got globalmodel request, sending response! ", flush=True)
response = alliance.GetGlobalModelResponse()
whoami(response.sender, self)
response.receiver.name = "reducer"
response.receiver.role = role_to_proto_role(Role.REDUCER)
response.model_id = self.orchestrator.get_model_id()
return response
####################################################################################################################
def run(self, config):
print("ORCHESTRATOR:starting combiner", flush=True)
self.orchestrator.run(config)
```
#### File: fedn/member/client.py
```python
import threading
import json
import tempfile
from datetime import datetime
import os
# TODO Remove from this level. Abstract to unified non implementation specific client.
from fedn.utils.dispatcher import Dispatcher
import fedn.proto.alliance_pb2 as alliance
import fedn.proto.alliance_pb2_grpc as rpc
import grpc
from scaleout.repository.helpers import get_repository
class Client:
def __init__(self, project):
self.project = project
try:
unpack = self.project.config['Alliance']
address = unpack['controller_host']
port = unpack['controller_port']
self.name = unpack['Member']['name']
except KeyError as e:
print("could not get all values from config file {}".format(e))
try:
self.name = os.environ['CLIENT_NAME']
except KeyError:
pass
self.repository = get_repository(config=unpack['Repository'])
self.bucket_name = unpack["Repository"]["minio_bucket"]
channel = grpc.insecure_channel(address + ":" + str(port))
self.connection = rpc.ConnectorStub(channel)
self.orchestrator = rpc.CombinerStub(channel)
print("Client: {} connected to {}:{}".format(self.name, address, port))
self.dispatcher = Dispatcher(self.project)
self.lock = threading.Lock()
threading.Thread(target=self._send_heartbeat, daemon=True).start()
threading.Thread(target=self.__listen_to_model_update_request_stream, daemon=True).start()
threading.Thread(target=self.__listen_to_model_validation_request_stream, daemon=True).start()
def __listen_to_model_update_request_stream(self):
""" Subscribe to the model update request stream. """
r = alliance.ClientAvailableMessage()
r.sender.name = self.name
r.sender.role = alliance.WORKER
metadata = [('client', r.sender.name)]
for request in self.orchestrator.ModelUpdateRequestStream(r, metadata=metadata):
if request.sender.role == alliance.COMBINER:
# Process training request
global_model_id = request.model_id
# TODO: Error handling
self.send_status("Received model update request.", log_level=alliance.Status.AUDIT,
type=alliance.StatusType.MODEL_UPDATE_REQUEST, request=request)
model_id = self.__process_training_request(global_model_id)
if model_id != None:
# Notify the requesting client that a model update is available
update = alliance.ModelUpdate()
update.sender.name = self.name
update.sender.role = alliance.WORKER
update.receiver.name = request.sender.name
update.receiver.role = request.sender.role
update.model_id = request.model_id
update.model_update_id = model_id
update.timestamp = str(datetime.now())
update.correlation_id = request.correlation_id
response = self.orchestrator.SendModelUpdate(update)
self.send_status("Model update completed.", log_level=alliance.Status.AUDIT,
type=alliance.StatusType.MODEL_UPDATE, request=update)
else:
self.send_status("Client {} failed to complete model update.", log_level=alliance.Status.WARNING,
request=request)
def __listen_to_model_validation_request_stream(self):
""" Subscribe to the model update request stream. """
r = alliance.ClientAvailableMessage()
r.sender.name = self.name
r.sender.role = alliance.WORKER
for request in self.orchestrator.ModelValidationRequestStream(r):
# Process training request
model_id = request.model_id
# TODO: Error handling
self.send_status("Recieved model validation request.", log_level=alliance.Status.AUDIT,
type=alliance.StatusType.MODEL_VALIDATION_REQUEST, request=request)
metrics = self.__process_validation_request(model_id)
if metrics != None:
# Send validation
validation = alliance.ModelValidation()
validation.sender.name = self.name
validation.sender.role = alliance.WORKER
validation.receiver.name = request.sender.name
validation.receiver.role = request.sender.role
validation.model_id = model_id
validation.data = json.dumps(metrics)
self.str = str(datetime.now())
validation.timestamp = self.str
validation.correlation_id = request.correlation_id
response = self.orchestrator.SendModelValidation(validation)
self.send_status("Model validation completed.", log_level=alliance.Status.AUDIT,
type=alliance.StatusType.MODEL_VALIDATION, request=validation)
else:
self.send_status("Client {} failed to complete model validation.".format(self.client),
log_level=alliance.Status.WARNING, request=request)
def __process_training_request(self, model_id):
self.send_status("\t Processing training request for model_id {}".format(model_id))
try:
model = self.repository.get_model(model_id)
fid, infile_name = tempfile.mkstemp(suffix='.h5')
fod, outfile_name = tempfile.mkstemp(suffix='.h5')
with open(infile_name, "wb") as fh:
fh.write(model)
self.dispatcher.run_cmd("train {} {}".format(infile_name, outfile_name))
model_id = self.repository.set_model(outfile_name, is_file=True)
os.unlink(infile_name)
os.unlink(outfile_name)
except Exception as e:
print("ERROR could not process training request due to error: {}".format(e))
model_id = None
return model_id
def __process_validation_request(self, model_id):
self.send_status("Processing validation request for model_id {}".format(model_id))
try:
model = self.repository.get_model(model_id)
fid, infile_name = tempfile.mkstemp(suffix='.h5')
fod, outfile_name = tempfile.mkstemp(suffix='.h5')
with open(infile_name, "wb") as fh:
fh.write(model)
self.dispatcher.run_cmd("validate {} {}".format(infile_name, outfile_name))
with open(outfile_name, "r") as fh:
validation = json.loads(fh.read())
os.unlink(infile_name)
os.unlink(outfile_name)
return validation
except Exception as e:
print("Validation failed with exception {}".format(e), flush=True)
return None
def send_status(self, msg, log_level=alliance.Status.INFO, type=None, request=None):
from google.protobuf.json_format import MessageToJson
status = alliance.Status()
status.sender.name = self.name
status.sender.role = alliance.WORKER
status.log_level = log_level
status.status = str(msg)
if type is not None:
status.type = type
if request is not None:
status.data = MessageToJson(request)
response = self.connection.SendStatus(status)
def _send_heartbeat(self, update_frequency=2.0):
while True:
heartbeat = alliance.Heartbeat(sender=alliance.Client(name=self.name, role=alliance.WORKER))
self.connection.SendHeartbeat(heartbeat)
# self.send_status("HEARTBEAT from {}".format(self.client),log_level=alliance.Status.INFO)
import time
time.sleep(update_frequency)
def run(self):
import time
try:
while True:
time.sleep(1)
print("CLIENT running.", flush=True)
except KeyboardInterrupt:
print("ok exiting..")
```
#### File: data_center/client/validate.py
```python
from __future__ import print_function
import json
import sys
import keras
import tensorflow as tf
import pickle
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from read_data import read_data
import tensorflow.keras.models as krm
def validate(model,data):
print("-- RUNNING Validation --")
# The data, split between train and test sets
(x_test, y_test) = read_data(data)
predictions = model.predict(x_test)
mse_obj = tf.keras.metrics.MeanAbsoluteError()
mse_obj.update_state(predictions, y_test)
mse_val = mse_obj.result().numpy()
print("-- validation COMPLETED --")
results = {"mae" : str(mse_val)}
return results
if __name__ == '__main__':
# Read the model
model = krm.load_model(sys.argv[1])
validation = validate(model,'../data/test.csv')
with open(sys.argv[2],"w") as fh:
fh.write(json.dumps(validation))
```
#### File: josh/client/train.py
```python
from __future__ import print_function
import sys
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.models as krm
from random import sample
import numpy
import pickle
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from read_data import read_data
def train(model,data,sample_fraction):
print("-- RUNNING TRAINING --")
batch_size = 32
epochs = 1
# The data, split between train and test sets
(x_train, y_train, classes) = read_data(data,sample_fraction=sample_fraction)
"""
num = 3 # Number of Clients
ran_order = sample(range(0, x_train.shape[0]), x_train.shape[0])
local_size=int(x_train.shape[0]/num)
partitionedX=[]
partitionedY=[]
for i in range(0,num):
partitionedX.append(x_train[ran_order[i*local_size:(i+1)*local_size]])
partitionedY.append(y_train[ran_order[i*local_size:(i+1)*local_size]])
X = numpy.array(partitionedX)
Y = numpy.array(partitionedY)
"""
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
print("-- TRAINING COMPLETED --")
return model
if __name__ == '__main__':
print("#####################################################################################################################")
print("#####################################################################################################################")
print("#####################################################################################################################")
print("#####################################################################################################################")
print("#####################################################################################################################")
model = krm.load_model(sys.argv[1])
model = train(model,'../data/train.csv',sample_fraction=0.1)
model.save(sys.argv[2])
``` |
{
"source": "joshysnow/objectstore",
"score": 2
} |
#### File: objectstore/objectstore/observer.py
```python
class Observer:
"""
Objects that are interested in a subject should
inherit from this class.
"""
def notify(self, sender: object):
pass
``` |
{
"source": "JoshYuJump/bali",
"score": 2
} |
#### File: bali/db/comparators.py
```python
from collections import defaultdict
from sqlalchemy import case
from sqlalchemy.ext.hybrid import Comparator
from sqlalchemy.util.langhelpers import dictlike_iteritems
class CaseComparator(Comparator):
def __init__(self, whens, expression):
super().__init__(expression)
self.whens, self.reversed_whens = dictlike_iteritems(whens), defaultdict(list)
for k, v in self.whens:
self.reversed_whens[v].append(k)
def __clause_element__(self):
return case(self.whens, self.expression)
def __eq__(self, other):
return super().__clause_element__().in_(self.reversed_whens[other])
```
#### File: bali/utils/timezone.py
```python
import calendar
import os
from datetime import datetime, date, timedelta
from typing import Union
import pytz
TzInfoType = Union[type(pytz.utc), pytz.tzinfo.DstTzInfo]
StrTzInfoType = Union[TzInfoType, str]
DEFAULT_TZ_INFO = "Asia/Jakarta"
NotAwareDescription = "expects an aware datetime"
def get_current_timezone() -> TzInfoType:
tz_info = os.environ.get("TZ", DEFAULT_TZ_INFO)
return pytz.timezone(tz_info)
def get_current_timezone_name() -> str:
return get_current_timezone().tzname(None)
def now() -> datetime:
return datetime.now(pytz.utc)
def is_aware(value: datetime) -> bool:
return value.utcoffset() is not None
def is_naive(value: datetime) -> bool:
return value.utcoffset() is None
def make_aware(
value: datetime,
*,
timezone: StrTzInfoType = None,
is_dst: bool = False,
) -> datetime:
assert is_naive(value), "expects a naive datetime"
if timezone is None:
timezone = get_current_timezone()
elif isinstance(timezone, str):
timezone = pytz.timezone(timezone)
else:
pass
return timezone.localize(value, is_dst=is_dst)
def make_naive(
value: datetime,
*,
timezone: StrTzInfoType = None,
) -> datetime:
assert is_aware(value), NotAwareDescription
if timezone is None:
timezone = get_current_timezone()
elif isinstance(timezone, str):
timezone = pytz.timezone(timezone)
else:
pass
return value.astimezone(timezone).replace(tzinfo=None)
def localtime(value: datetime = None, timezone: StrTzInfoType = None) -> datetime:
value, timezone = value or now(), timezone or get_current_timezone()
if isinstance(timezone, str):
timezone = pytz.timezone(timezone)
assert is_aware(value), NotAwareDescription
return value.astimezone(timezone)
def localdate(value: datetime = None, timezone: StrTzInfoType = None) -> date:
return localtime(value, timezone).date()
def start_of(
granularity: str,
value: datetime = None,
*,
timezone: StrTzInfoType = None,
) -> datetime:
value = localtime(value, timezone)
if granularity == "year":
value = value.replace(month=1, day=1)
elif granularity == "month":
value = value.replace(day=1)
elif granularity == "week":
value = value - timedelta(days=calendar.weekday(value.year, value.month, value.day))
elif granularity == "day":
pass
else:
raise ValueError("Granularity must be year, month, week or day")
return value.replace(hour=0, minute=0, second=0, microsecond=0)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.