input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import operator
import os
import unittest
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud._helpers import _microseconds_from_datetime
from google.cloud._helpers import UTC
from google.cloud.bigtable.client import Client
from google.cloud.bigtable.column_family import MaxVersionsGCRule
from google.cloud.bigtable.row_filters import ApplyLabelFilter
from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter
from google.cloud.bigtable.row_filters import RowFilterChain
from google.cloud.bigtable.row_filters import RowFilterUnion
from google.cloud.bigtable.row_data import Cell
from google.cloud.bigtable.row_data import PartialRowData
from google.cloud.environment_vars import BIGTABLE_EMULATOR
from test_utils.retry import RetryErrors
from test_utils.system import EmulatorCreds
from test_utils.system import unique_resource_id
LOCATION_ID = 'us-central1-c'
INSTANCE_ID = 'g-c-p' + unique_resource_id('-')
TABLE_ID = 'google-cloud-python-test-table'
COLUMN_FAMILY_ID1 = u'col-fam-id1'
COLUMN_FAMILY_ID2 = u'col-fam-id2'
COL_NAME1 = b'col-name1'
COL_NAME2 = b'col-name2'
COL_NAME3 = b'col-name3-but-other-fam'
CELL_VAL1 = b'cell-val'
CELL_VAL2 = b'cell-val-newer'
CELL_VAL3 = b'altcol-cell-val'
CELL_VAL4 = b'foo'
ROW_KEY = b'row-key'
ROW_KEY_ALT = b'row-key-alt'
EXISTING_INSTANCES = []
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE = None
IN_EMULATOR = False
def _retry_on_unavailable(exc):
"""Retry only errors whose status code is 'UNAVAILABLE'."""
from grpc import StatusCode
return exc.code() == StatusCode.UNAVAILABLE
def setUpModule():
from google.cloud.exceptions import GrpcRendezvous
Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None
if Config.IN_EMULATOR:
credentials = EmulatorCreds()
Config.CLIENT = Client(admin=True, credentials=credentials)
else:
Config.CLIENT = Client(admin=True)
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID)
if not Config.IN_EMULATOR:
retry = RetryErrors(GrpcRendezvous,
error_predicate=_retry_on_unavailable)
instances, failed_locations = retry(Config.CLIENT.list_instances)()
if len(failed_locations) != 0:
raise ValueError('List instances failed in module set up.')
EXISTING_INSTANCES[:] = instances
# After listing, create the test instance.
created_op = Config.INSTANCE.create()
created_op.result(timeout=10)
def tearDownModule():
if not Config.IN_EMULATOR:
Config.INSTANCE.delete()
class TestInstanceAdminAPI(unittest.TestCase):
def setUp(self):
if Config.IN_EMULATOR:
self.skipTest(
'Instance Admin API not supported in Bigtable emulator')
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
def test_list_instances(self):
instances, failed_locations = Config.CLIENT.list_instances()
self.assertEqual(failed_locations, [])
# We have added one new instance in `setUpModule`.
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (instance in EXISTING_INSTANCES or
instance == Config.INSTANCE)
self.assertTrue(instance_existence)
def test_reload(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID)
# Make sure metadata unset before reloading.
instance.display_name = None
instance.reload()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
def test_create_instance(self):
ALT_INSTANCE_ID = 'new' + unique_resource_id('-')
instance = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(timeout=10)
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
def test_update(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = 'Foo Bar Baz'
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
Config.INSTANCE.update()
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class TestTableAdminAPI(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._table = Config.INSTANCE.table(TABLE_ID)
cls._table.create()
@classmethod
def tearDownClass(cls):
cls._table.delete()
def setUp(self):
self.tables_to_delete = []
def tearDown(self):
for table in self.tables_to_delete:
table.delete()
def test_list_tables(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the table
# created in `setUpClass` here will be the only one.
tables = Config.INSTANCE.list_tables()
self.assertEqual(tables, [self._table])
def test_create_table(self):
temp_table_id = 'foo-bar-baz-table'
temp_table = Config.INSTANCE.table(temp_table_id)
temp_table.create()
self.tables_to_delete.append(temp_table)
# First, create a sorted version of our expected result.
name_attr = operator.attrgetter('name')
expected_tables = sorted([temp_table, self._table], key=name_attr)
# Then query for the tables in the instance and sort them by
# name as well.
tables = Config.INSTANCE.list_tables()
sorted_tables = sorted(tables, key=name_attr)
self.assertEqual(sorted_tables, expected_tables)
def test_create_column_family(self):
temp_table_id = 'foo-bar-baz-table'
temp_table = Config.INSTANCE.table(temp_table_id)
temp_table.create()
self.tables_to_delete.append(temp_table)
self.assertEqual(temp_table.list_column_families(), {})
gc_rule = MaxVersionsGCRule(1)
column_family = temp_table.column_family(COLUMN_FAMILY_ID1,
gc_rule=gc_rule)
column_family.create()
col_fams = temp_table.list_column_families()
self.assertEqual(len(col_fams), 1)
retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1]
self.assertIs(retrieved_col_fam._table, column_family._table)
self.assertEqual(retrieved_col_fam.column_family_id,
column_family.column_family_id)
self.assertEqual(retrieved_col_fam.gc_rule, gc_rule)
def test_update_column_family(self):
temp_table_id = 'foo-bar-baz-table'
temp_table = Config.INSTANCE.table(temp_table_id)
temp_table.create()
self.tables_to_delete.append(temp_table)
gc_rule = MaxVersionsGCRule(1)
column_family = temp_table.column_family(COLUMN_FAMILY_ID1,
gc_rule=gc_rule)
column_family.create()
# Check that our created table is as expected.
col_fams = temp_table.list_column_families()
self.assertEqual(col_fams, {COLUMN_FAMILY_ID1: column_family})
# Update the column family's GC rule and then try to update.
column_family.gc_rule = None
column_family.update()
# Check that the update has propagated.
col_fams = temp_table.list_column_families()
self.assertIsNone(col_fams[COLUMN_FAMILY_ID1].gc_rule)
def test_delete_column_family(self):
temp_table_id = 'foo-bar-baz-table'
temp_table = Config.INSTANCE.table(temp_table_id)
temp_table.create()
self.tables_to_delete.append(temp_table)
self.assertEqual(temp_table.list_column_families(), {})
column_family = temp_table.column_family(COLUMN_FAMILY_ID1)
column_family.create()
# Make sure the family is there before deleting it.
col_fams = temp_table.list_column_families()
self.assertEqual(list(col_fams.keys()), [COLUMN_FAMILY_ID1])
column_family.delete()
# Make sure we have successfully deleted it.
self.assertEqual(temp_table.list_column_families(), {})
class TestDataAPI(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._table = table = Config.INSTANCE.table(TABLE_ID)
table.create()
table.column_family(COLUMN_FAMILY_ID1).create()
table.column_family(COLUMN_FAMILY_ID2).create()
@classmethod
def tearDownClass(cls):
# Will also delete any data contained in the table.
cls._table.delete()
def _maybe_emulator_skip(self, message):
# NOTE: This method is necessary because ``Config.IN_EMULATOR``
# is set at runtime rather than import time, which means we
# can't use the @unittest.skipIf decorator.
if Config.IN_EMULATOR:
self.skipTest(message)
def setUp(self):
self.rows_to_delete = []
def tearDown(self):
for row in self.rows_to_delete:
row.clear()
row.delete()
row.commit()
def _write_to_row(self, row1=None, row2=None, row3=None, row4=None):
timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC)
timestamp1_micros = _microseconds_from_datetime(timestamp1)
# Truncate to millisecond granularity.
timestamp1_micros -= (timestamp1_micros % 1000)
timestamp1 = _datetime_from_microseconds(timestamp1_micros)
# 1000 microseconds is a millisecond
timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000)
timestamp2_micros = _microseconds_from_datetime(timestamp2)
timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000)
timestamp3_micros = _microseconds_from_datetime(timestamp3)
timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000)
timestamp4_micros = _microseconds_from_datetime(timestamp4)
if row1 is not None:
row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1,
timestamp=timestamp1)
if row2 is not None:
row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2,
timestamp=timestamp2)
if row3 is not None:
row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3,
timestamp=timestamp3)
if row4 is not None:
row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4,
timestamp=timestamp4)
# Create the cells we will check.
cell1 = Cell(CELL_VAL1, timestamp1_micros)
cell2 = Cell(CELL_VAL2, timestamp2_micros)
cell3 = Cell(CELL_VAL3, timestamp3_micros)
cell4 = Cell(CELL_VAL4, timestamp4_micros)
return cell1, cell2, cell3, cell4
def test_timestamp_filter_millisecond_granularity(self):
from google.cloud.bigtable import row_filters
end = datetime.datetime.now()
start = end - datetime.timedelta(minutes=60)
timestamp_range = row_filters.TimestampRange(start=start, end=end)
timefilter = row_filters.TimestampRangeFilter(timestamp_range)
row_data = self._table.read_rows(filter_=timefilter)
row_data.consume_all()
def test_mutate_rows(self):
row1 = self._table.row(ROW_KEY)
row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1)
row1.commit()
self.rows_to_delete.append(row1)
row2 = self._table.row(ROW_KEY_ALT)
row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2)
row2.commit()
self.rows_to_delete.append(row2)
# Change the contents
row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL3)
row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL4)
rows = [row1, row2]
statuses = self._table.mutate_rows(rows)
result = [status.code for status in statuses]
expected_result = [0, 0]
self.assertEqual(result, expected_result)
# Check the contents
row1_data = self._table.read_row(ROW_KEY)
self.assertEqual(
row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL3)
row2_data = self._table.read_row(ROW_KEY_ALT)
self.assertEqual(
row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4)
def test_read_large_cell_limit(self):
row = self._table.row(ROW_KEY)
self.rows_to_delete.append(row)
number_of_bytes = 10 * 1024 * 1024
data = b'1' * number_of_bytes # 10MB of 1's.
row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, data)
row.commit()
# Read back the contents of the row.
partial_row_data = self._table.read_row(ROW_KEY)
self.assertEqual(partial_row_data.row_key, ROW_KEY)
cell = partial_row_data.cells[COLUMN_FAMILY_ID1]
column = cell[COL_NAME1]
self.assertEqual(len(column), 1)
self.assertEqual(column[0].value, data)
def test_read_row(self):
row = self._table.row(ROW_KEY)
self.rows_to_delete.append(row)
cell1, cell2, cell3, cell4 = self._write_to_row(row, row, row, row)
row.commit()
# Read back the contents of the row.
partial_row_data = self._table.read_row(ROW_KEY)
self.assertEqual(partial_row_data.row_key, ROW_KEY)
# Check the cells match.
ts_attr = operator.attrgetter('timestamp')
expected_row_contents = {
COLUMN_FAMILY_ID1: {
COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True),
COL_NAME2: [cell3],
},
COLUMN_FAMILY_ID2: {
COL_NAME3: [cell4],
},
}
self.assertEqual(partial_row_data.cells, expected_row_contents)
def test_read_rows(self):
row = self._table.row(ROW_KEY)
row_alt = self._table.row(ROW_KEY_ALT)
self.rows_to_delete.extend([row, row_alt])
cell1, cell2, cell3, cell4 = self._write_to_row(row, row_alt,
row, row_alt)
row.commit()
row_alt.commit()
rows_data = self._table.read_rows()
self.assertEqual(rows_data.rows, {})
rows_data.consume_all()
# NOTE: We should refrain from editing protected data on instances.
# Instead we should make the values public or provide factories
# for constructing objects with them.
row_data = PartialRowData(ROW_KEY)
row_data._chunks_encountered = True
row_data._committed = True
row_data._cells = {
COLUMN_FAMILY_ID1: {
COL_NAME1: [cell1],
COL_NAME2: [cell3],
},
}
row_alt_data = PartialRowData(ROW_KEY_ALT)
row_alt_data._chunks_encountered = True
row_alt_data._committed = True
row_alt_data._cells = {
COLUMN_FAMILY_ID1: {
COL_NAME1: [cell2],
},
COLUMN_FAMILY_ID2: {
COL_NAME3: [cell4],
},
}
expected_rows = {
ROW_KEY: row_data,
ROW_KEY_ALT: row_alt_data,
}
self.assertEqual(rows_data.rows, expected_rows)
def test_read_with_label_applied(self):
self._maybe_emulator_skip('Labels not supported by Bigtable emulator')
row = self._table.row(ROW_KEY)
self.rows_to_delete.append(row)
cell1, _, cell3, _ = self._write_to_row(row, None, row)
row.commit()
# Combine a label with column 1.
label1 = u'label-red'
label1_filter = ApplyLabelFilter(label1)
col1_filter = ColumnQualifierRegexFilter(COL_NAME1)
chain1 = RowFilterChain(filters=[col1_filter, label1_filter])
# Combine a label with column 2.
label2 = u'label-blue'
label2_filter = | |
<gh_stars>0
import pygame
from components import Btn, Text, Line, Image, Stage
import simpleaudio as sa
import os
def float_eq(f1, f2):
return abs(f1 - f2) <= 1e-4
#This class represents a musical bar
class Bar:
"""
[__init__ self bpm timing treble bass] generates a new Bar
with [bpm] beats per minute, [timing] as a tuple of the top and bottom
timings, [treble] indicating the notes in the treble clef and [bass]
indicating the notes in the bass clef.
We use a tuple of a list of notes in plaintext, followed by the duration
to represent a note (ie (['C4', 'E4'], 2))
"""
def __init__(self, bpm, timing, treble, bass):
self.bpm = bpm
self.timing = timing
self.treble = treble
self.bass = bass
#[get_length self] returns the length of this bar in crotchets
def get_length(self):
ans = 0
for (_, dur) in self.treble:
ans += dur
return ans
"""
[note_at_time self time treble] returns the index of the note at [time] crotchets
within the bar. It returns the index of the note in the treble clef
if [treble] is True and bass clef otherwise.
"""
def note_at_time(self, time, treble):
accl = 0
if treble:
notes = self.treble
else:
notes = self.bass
for (idx,(_, dur)) in zip(range(len(notes)), notes):
accl += dur
if accl >= time:
return idx
return len(notes) - 1
"""
[end_duration idx treble] returns the time at which the note at [idx]
in the clef indicated by [treble] (Treble Clef if true, Bass Clef otherwise)
is completed. The time is given in number of crotchets from the start
of the bar.
"""
def end_duration(self, idx, treble):
accl = 0
if treble:
notes = self.treble
else:
notes = self.bass
for i in range(idx + 1):
accl += notes[i][1]
return accl
"""
[get_treble self] returns the list of notes that make up the
treble clef of this bar.
"""
def get_treble(self):
return self.treble
"""
[get_bass self] returns the list of notes that make up the
bass clef of this bar.
"""
def get_bass(self):
return self.bass
"""
[get_bpm self] returns the beats per minute (crotchet = 1 beat) of
this bar.
"""
def get_bpm(self):
return self.bpm
"""
[get_timing self] returns the timing of this bar as a tuple
with the 0th element representing the top number and the 1st element
representing the bottom number.
"""
def get_timing(self):
return self.timing
#This class represents a musical score
class Score:
"""
[__init__ self file_name note_imgs player] attempts to load the score
file at [file_name]. [note_imgs] and [player] are used to ensure that
all notes in the score are indeed playable and able to be rendered.
Any issues that occur will be printed to stdout and will contain
the line and bar number of the error.
"""
def __init__(self, file_name, note_imgs, player):
self.note_imgs = note_imgs
self.player = player
try:
with open(file_name, 'r') as file:
lines = file.readlines()
#To make sure the last bar is read
lines.append('\n')
self.num_bars = 0
self.valid = True
self.reason = "File is good"
#Read metadata
self.name = lines[0].strip()
bpm = int(lines[1])
timing_split = lines[2].strip().split(' ')
timing = (int(timing_split[0]), int(timing_split[1]))
#Read score
#Stores a tuple of <note name> (ie 'A4') and duration (ie 1)
#A list of lists of tuples (organised by bars)
#Use sharp to store notes, not flat (ie 'A#4')
self.bars = []
bar_no = 1
bar_treble = []
bar_bass = []
bar_treble_len = 0
bar_bass_len = 0
#Requires a newline between every bar
for line_no, note in zip(range(4, len(lines)), lines[4:]):
note = note.upper()
#Lines that start with # are comments
if note.strip().startswith("#"):
continue
#New bar number
elif note == '\n':
#Assert previous bar is good
timing_len = timing[0] / timing[1] * 4.0
if not float_eq(bar_treble_len, timing_len) \
or not float_eq(bar_bass_len, timing_len):
self.valid = False
self.reason = "Bar {} (line {}) appears to \
be invalid (wrong timing)".format(bar_no, line_no)
return
#Add bars to treble and bass, increment bar no
bar = Bar(bpm, timing, bar_treble, bar_bass)
self.bars.append(bar)
bar_treble = []
bar_bass = []
bar_treble_len = 0
bar_bass_len = 0
bar_no += 1
elif note.startswith("CHANGE"):
#Change pace or change timing
note_split = note.strip().split(' ')
if (note_split[1] == 'PACE'):
bpm = int(note_split[2])
elif (note_split[1] == 'TIMING'):
timing = (int(note_split[2]), int(note_split[3]))
else:
#Parse a normal note
note_split = note.strip().split(' ')
if len(note_split) < 3:
self.valid = False
self.reason = "Bar {} (line {}) appears to \
be invalid".format(bar_no, line_no)
return
clef = note_split[0]
#Have commas no spaces for multiple notes
pitches = note_split[1].split(",")
for pitch in pitches:
if not self.player.has_note(pitch):
self.valid = False
self.reason = "Note {} in Bar {} (line {}) is not \
playable".format(pitch, bar_no, line_no)
return
duration = float(note_split[2])
if not self.note_imgs.has_note(duration):
self.valid = False
self.reason = "Duration {0:.2f} in Bar {1:} (line {2:}) \
cannot be displayed".format(duration, bar_no, line_no)
return
if clef == 'B':
bar_bass.append((pitches, duration))
bar_bass_len += duration
else:
bar_treble.append((pitches, duration))
bar_treble_len += duration
self.num_bars = bar_no - 1
except FileNotFoundError:
self.valid = False
self.reason = "File not found"
#except:
# self.valid = False
# self.reason = "An exception occurred while parsing the file"
#[get_metadata self] returns the metadata of this score as a dictionary
def get_metadata(self):
return {"name" : self.name}
#[get_total_bars self] returns the number of bars in this score
def get_total_bars(self):
return self.num_bars
#[get_bar self bar] returns the bar at index [bar] of this score
def get_bar(self, bar):
return self.bars[bar]
#This class renders all of the notes on the score onto the screen.
#It also provides playback control and is able to optionally play notes
#based on the playback
class RenderedScore:
#[__init__ self note_imgs player score] generates a new RenderedScore
#using the images from [note_imgs], audio player [player] and score [score]
def __init__(self, note_imgs, player, score = None):
self.colors = {"yellow": (244, 247, 35), "black": (0,0,0), \
"dark_blue": (47, 29, 245)}
self.note_imgs = note_imgs
self.player = player
self.num_bars = 2
#Used to mark previous notes as black
self.mark_black = True
#Used to notate whether to play notes
self.play_notes = True
if score == None:
self.score = None
else:
self.replace_score(score)
#[bind_screen self parent_screen] is required by the parent Screen.
#It allows interaction with the parent Screen.
def bind_screen(self, parent_screen):
self.parent_screen = parent_screen
#[replace_score self new_score] replaces the current score
#with a new one and draws the new score onto the stage
def replace_score(self, new_score):
self.score = new_score
#Advance at 1.0 pace
self.advance_rate = 1.0
#Stage with no elements
self.stage = Stage()
#Keep track of current state
#Start at the 0th bar
self.curr_bar_idx = 0
#Have not started playing music, set to False when paused
self.has_started = False
#Grab current bar
self.bars = self.get_bars()
#The current timing in the current bar (based on bar timing and bpm)
self.curr_timing = 0.0
#1.0 for normal, -<sth> for rewind, +<sth> for ffwd, 0 for pause
self.advance_pace = 1.0
#Add stage elements
#Add trigger points
self.left_margin = 10
self.start_left_margin = 50
self.right_margin = 310
self.treble_begin = 70
self.treble_increment = 10
self.bass_begin = 160
self.bass_increment = 10
#Precompute adjustments
self.bass_adj = self.get_adj(False)
self.treble_adj = self.get_adj(True)
#Add Treble Lines, Clef and Timing
self.treble_lines = []
self.treble_clef = Image("img/treble_clef.png", (20, 50), (35, 70))
self.stage.add_elt(self.treble_clef)
#30 up to 70
for i in range(self.treble_begin - 4 * self.treble_increment, \
self.treble_begin + self.treble_increment, self.treble_increment):
line = Line((self.left_margin, i), (self.right_margin, i))
self.treble_lines.append(line)
self.stage.add_elt(line)
#Add Bass Lines, Clef and Timing
self.bass_lines = []
self.bass_clef = Image("img/bass_clef.png", (25, 135), (35, 35))
self.stage.add_elt(self.bass_clef)
#120 up to 160
for i in range(self.bass_begin - 4 * self.bass_increment, \
self.bass_begin + self.bass_increment, self.bass_increment):
line = Line((self.left_margin, i), (self.right_margin, i))
self.bass_lines.append(line)
self.stage.add_elt(line)
#Add bar lines
#Generate bar lines for left and right edges
self.bar_lines = [\
Line((self.left_margin, self.treble_begin - 4 * self.treble_increment),\
(self.left_margin,self.treble_begin)), \
Line((self.left_margin, self.bass_begin - 4 * self.bass_increment),\
(self.left_margin,self.bass_begin))]
#Draw bar lines (no changes)
for bar_idx, bar in zip(range(len(self.bars)), self.bars):
#Add bar lines
end_x = self.get_bar_start_x(bar_idx + 1)
self.bar_lines.append(Line((end_x, self.treble_begin \
- 4 * self.treble_increment),(end_x,self.treble_begin)))
self.bar_lines.append(Line((end_x, self.bass_begin \
- 4 * self.bass_increment),(end_x,self.bass_begin)))
for bar_line in self.bar_lines:
self.stage.add_elt(bar_line)
#Draw current position line
play_line_pos = self.get_note_horizontal_pos(self.curr_bar_idx, \
self.curr_timing) + 5
self.play_line = Line((play_line_pos, self.treble_begin - \
5 * self.treble_increment), (play_line_pos, self.bass_begin + \
self.bass_increment))
self.stage.add_elt(self.play_line)
#Grab new timings
self.refresh_timings()
#[refresh_timings self] refreshes all of the bars, timings and bpm
#displayed on the stage based on the current timing (self.curr_timing)
#and current bar index (self.curr_bar_idx)
def refresh_timings(self):
self.stage.clear_tmp_elts()
self.bar_numbers = []
self.timings = []
self.pace_text = []
#Grab current bar
self.bars = self.get_bars()
prev_timing = (None, None)
prev_pace = None
#Render all the bars
for bar_idx, bar in zip(range(len(self.bars)), self.bars):
bar_timings = []
top_timing, bottom_timing = bar.get_timing()
curr_pace = bar.get_bpm()
start_x = self.get_bar_start_x(bar_idx)
bar_idx_norm = self.curr_bar_idx - self.curr_bar_idx % 2
bar_num = bar_idx_norm + bar_idx + 1
self.bar_numbers.append(Text(str(bar_num), (start_x + 5, \
self.treble_begin - 4 * self.treble_increment - 10), \
font_size = 20))
if prev_timing != (top_timing, bottom_timing):
top_timing = str(top_timing)
bottom_timing = str(bottom_timing)
bar_timings.append(Text(top_timing, (start_x + 10, 40), \
font_size = 42))
bar_timings.append(Text(bottom_timing, (start_x + 10, 62), \
font_size = 42))
bar_timings.append(Text(top_timing, (start_x + 10, 130), \
font_size = 42))
bar_timings.append(Text(bottom_timing, (start_x + 10, 152), \
font_size = 42))
if prev_pace != curr_pace:
self.pace_text.append(Text(self.pace_to_str(curr_pace), \
(start_x + 70, self.treble_begin - 4 * \
self.treble_increment - 10), font_size = 20))
#Add timings
prev_timing = bar.get_timing()
prev_pace = curr_pace
self.timings.append(bar_timings)
for timing in bar_timings:
self.stage.add_tmp_elt(timing)
for bar_number in self.bar_numbers:
self.stage.add_tmp_elt(bar_number)
for text in self.pace_text:
self.stage.add_tmp_elt(text)
self.refresh_notes()
#[refresh_notes self] updates the position of all the notes on the screen
#based on self.curr_timing and self.curr_bar_idx
def refresh_notes(self):
#Draw the notes
#Stored by bar in same order as self.bars, then list of pitches for
#each note, then a list of Components for each pitch
#the note Image is always the last element in the list of Components
self.treble_note_imgs = []
self.bass_note_imgs = []
for bar_idx, bar in zip(range(self.num_bars), self.bars):
self.treble_bar_imgs = []
self.add_notes_from_clef(bar_idx, bar.get_treble(), True, \
self.treble_bar_imgs)
self.bass_bar_imgs = []
self.add_notes_from_clef(bar_idx, bar.get_bass(), False, \
self.bass_bar_imgs)
self.treble_note_imgs.append(self.treble_bar_imgs)
self.bass_note_imgs.append(self.bass_bar_imgs)
for bar in self.treble_note_imgs + self.bass_note_imgs:
for pitches in bar:
for pitch in pitches:
for component in pitch:
self.stage.add_tmp_elt(component)
"""
[add_notes_from_clef self bar_idx notes treble append_to] appends [notes]
from the bar at [bar_idx] relative to self.curr_bar_idx from the clef
indicated by [treble] (Treble if True, Bass if False) to | |
<reponame>rcooke-ast/PYPIT<gh_stars>0
"""
Provide basic mosaicing functions.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
from IPython import embed
import numpy as np
from scipy import ndimage
from pypeit import msgs
from pypeit.core import transform
from pypeit.utils import inverse
def build_image_mosaic_transform(shape, shift, rotation=0., binning=(1.,1.)):
r"""
Build the affine transform matrix for a binned image.
The order of operations is as follows:
#. Shift the coordinate system to the center of the image,
assuming the (0,0) coordinate is at the center of the first
pixel.
#. For binning that is different in each dimension, scale the
size of each pixel back to the correct aspect ratio.
#. Rotate the image.
#. Undo the dimension scaling to account for the binning aspect
ratio.
#. Undo the shift to the center of the image.
#. Apply the requested shift, accounting for the binning.
Steps 1-5 are only performed if the image is rotated. Steps 2 and 4 are
only done if the binning is not square. All steps are compiled into a
single transformation matrix using
:func:`~pypeit.core.transform.affine_transform_series`.
The coordinate reference frame adopts numpy/matplotlib conventions; see
:ref:`mosaic`. That is, if you plot the image using
`matplotlib.pyplot.imshow`_, the first axis is along the ordinate (Cartesian
:math:`y`) and the second axis is along the abscissa (Cartesian :math:`x`).
Shifts and rotation are with respect to this coordinate system; see
descriptions of ``shift`` and ``rotation``.
Args:
shape (:obj:`tuple`):
A two-tuple with the shape of the **binned** image. In terms of the
coordinate system, this provides the number of pixels along
Cartesian :math:`y` then along Cartesian :math:`x`; i.e., ``(ny,
nx)``. See description above.
shift (:obj:`tuple`):
A two-tuple with the nominal shift *in Cartesian coordinates* of the
*unbinned* image in the mosaic in each dimension. For example,
setting ``shift=(1,10)`` means the image is shifted :math:`+1` pixel
in :math:`x` and :math:`+10` pixels in :math:`y`.
rotation (:obj:`float`, optional):
The counter-clockwise rotation in degrees of the **unbinned** image
in the mosaic. The rotation assumes the coordinate frame described
above.
binning (:obj:`tuple`, optional):
The number of pixels binned in each dimension. The order should
match the provided image shape. That is, the order is the number of
binned pixels in :math:`y`, then the number in :math:`x`. This only
has an effect on the results when the binning is not the same in
both dimensions.
Returns:
`numpy.ndarray`_: The single coordinate transformation matrix that
applies all transformations. See
:func:`pypeit.core.transform.affine_transform_series`.
"""
if len(shape) != 2:
msgs.error('Shape must be a two-tuple.')
if len(shift) != 2:
msgs.error('Shift must be a two-tuple.')
if len(binning) != 2:
msgs.error('Binning must be a two-tuple.')
tform = []
if np.absolute(rotation) > 0:
# Offset to the center of the image
tform += [dict(translation=(-(shape[0]-1)/2, -(shape[1]-1)/2))]
if binning[0] != binning[1]:
# Rescale back to square pixels. E.g., if the binning is 1x2, this
# scales the size of each pixel in 2nd axis by 2.
tform += [dict(scale=(1.,binning[1]/binning[0]))]
# Apply the rotation
tform += [dict(rotation=-np.radians(rotation))]
if binning[0] != binning[1]:
# Undo the bin scaling
tform += [dict(scale=(1.,binning[0]/binning[1]))]
# Undo the offset to the image center
tform += [dict(translation=((shape[0]-1)/2, (shape[1]-1)/2))]
# Apply the shift
tform += [dict(translation=(shift[1]/binning[1], shift[0]/binning[0]))]
# Compile into a single transformation and return
return transform.affine_transform_series(tform)
def prepare_mosaic(shape, tforms, buffer=0, inplace=False):
r"""
Prepare to mosaic images by determining the shape of the mosaic
image and adjusting the transformation coordinates to the mosaic
pixel coordinates.
Args:
shape (:obj:`tuple`):
A two-tuple with the shape of the images to mosaic. The shape is
assumed to be the same for all images.
tforms (:obj:`list`):
A list of :math:`3\times3` `numpy.ndarray`_ objects with the
transformations to apply to each image. These are adjusted
as necessary to perform the transformations within the
coordinate system of the mosaic image.
buffer (:obj:`int`, optional):
An added buffer in each dimension that frames the mosaic image and
should not have any image data. Buffer must be non-negative.
inplace (:obj:`bool`, optional):
If True, alter the provided ``tforms`` in-place. Otherwise,
the returned transforms are new arrays.
Returns:
:obj:`tuple`: Returns the shape for the mosaic image as a
two-tuple and the new transformation matrices as a list of
`numpy.ndarray`_ objects.
"""
# Use the number of transforms to set the number of images
nimg = len(tforms)
# Get a box that bounds the transformed coordinates of all the mosaic
# images.
coo = np.array([[-0.5,-0.5], [shape[0]-0.5,-0.5], [shape[0]-0.5, shape[1]-0.5],
[-0.5, shape[1]-0.5]])
box = None
for i in range(nimg):
tc = transform.coordinate_transform_2d(coo, tforms[i])
if box is None:
box = np.vstack((np.floor(np.amin(tc, axis=0)), np.ceil(np.amax(tc, axis=0))))
continue
box[0] = np.amin(np.vstack((tc,box[0])), axis=0)
box[1] = np.amax(np.vstack((tc,box[1])), axis=0)
# Set the mosaic image shape
if buffer < 0:
msgs.error('Mosaic image buffer must be >= 0.')
mosaic_shape = tuple(*np.ceil(np.diff(box, axis=0) + 2*buffer).astype(int))
# Adjust the image transformations to be within the limits of the mosaic
# image.
_tforms = tforms if inplace else [None]*nimg
for i in range(nimg):
_tforms[i] = transform.affine_transform_series([dict(translation=(-(box[0,0]-buffer),
-(box[0,1]-buffer)))
]) @ tforms[i]
return mosaic_shape, _tforms
def build_image_mosaic(imgs, tforms, ivar=None, bpm=None, mosaic_shape=None, cval=0., order=0,
overlap='combine'):
r"""
Use the provided images and transformation matrices to construct an image
mosaic.
.. warning::
Beware when using ``order > 0``!
Bad-pixel masks are *always* mapped to the mosaic image using
``order=0`` (i.e., without interpolation). However, masked pixels are
not excluded from the input images during the transformation. For
higher order interpolations (``order > 0``), this means that the masked
pixels can contribute to the interpolation for any given output pixel.
Users should appropriately consider how these pixels will affect the
mosaic pixels *before* calling this function.
Similarly, error propagation from the input image to the mosaic image is
only approximate when ``order > 0``. Error propagaion is performed
simply by applying the coordinate transform to each variance image with
the same order as used for the input image, and then combining those
variances as necessary in overlap regions.
Tests show that this approach is also not invertable. I.e., iteratively
transforming the image back and forth between the native and mosaic
frames lead to image drifts.
Args:
imgs (:obj:`list`, `numpy.ndarray`_):
List of `numpy.ndarray`_ images to include in the mosaic. If arrays
do not contain floating-point values, they will be cast as
``np.float64`` before passing them to
`scipy.ndimage.affine_transform`_. The shape of all the input images
must be identical if ``mosaic_shape`` is None.
tforms (:obj:`list`, `numpy.ndarray`_):
List of `numpy.ndarray`_ objects with the transformation matrices
necessary to convert between image and mosaic coordinates. See
:func:`pypeit.core.mosaic.build_image_mosaic_transform`. The number
of transforms must match the number of images. If ``mosaic_shape``
is None, the transforms are considered in a relative sense. That
is, the shape of the output mosaic is determined by applying these
transforms to the bounding boxes of each image and then determining
the shape needed to retain all pixels in the input images. The
transforms are then adjusted appropriately to map to this shape; see
:func:`~pypeit.core.mosaic.prepare_mosaic`. If ``mosaic_shape`` is
*not* None, these transforms are expected to map directly to the
output mosaic coordinates.
ivar (:obj:`list`, `numpy.ndarray`_, optional):
List of `numpy.ndarray`_ images with the inverse variance of the
image data. The number of inverse-variance images must match the
number of images in the mosaic. If None, inverse variance is
returned as None.
bpm (:obj:`list`, `numpy.ndarray`_, optional):
List of boolean `numpy.ndarray`_ objects with the bad-pixel mask for
each image in the mosaic. The number of bad-pixel masks must match
the number of images in the mosaic. If None, all input pixels are
considered valid.
mosaic_shape (:obj:`tuple`, optional):
Shape for the output image. If None, the shape is determined by
:func:`pypeit.core.mosaic.prepare_mosaic` and the shape of all the
input images *must* be identical.
cval (:obj:`float`, optional):
The value used to fill empty pixels in the mosaic.
order (:obj:`int`, optional):
The order of the spline interpolation of each input image onto the
mosaic grid. This is passed directly to
`scipy.ndimage.affine_transform`_. The order has to be in the range
0-5; ``order=0`` is nearest-grid-point interpolations, ``order=1``
is linear.
overlap | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Catalog utility functions / classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.coordinates import Angle, SkyCoord
__all__ = [
'coordinate_iau_format',
'ra_iau_format',
'dec_iau_format',
'skycoord_from_table',
'select_sky_box',
'select_sky_circle',
]
def coordinate_iau_format(coordinate, ra_digits, dec_digits=None,
prefix=''):
"""Coordinate format as an IAU source designation.
Reference: http://cdsweb.u-strasbg.fr/Dic/iau-spec.html
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord`
Source coordinate.
ra_digits : int (>=2)
Number of digits for the Right Ascension part.
dec_digits : int (>=2) or None
Number of digits for the declination part
Default is ``dec_digits`` = None, meaning ``dec_digits`` = ``ra_digits`` - 1.
prefix : str
Prefix to put before the coordinate string, e.g. "SDSS J".
Returns
-------
strrepr : str or list of strings
IAU format string representation of the coordinate.
If this input coordinate is an array, the output is a list of strings.
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> from gammapy.catalog import coordinate_iau_format
Example position from IAU specification
>>> coordinate = SkyCoord('00h51m09.38s -42d26m33.8s', frame='icrs')
>>> designation = 'QSO J' + coordinate_iau_format(coordinate, ra_digits=6)
>>> print(designation)
QSO J005109-4226.5
>>> coordinate = coordinate.transform_to('fk4')
>>> designation = 'QSO B' + coordinate_iau_format(coordinate, ra_digits=6)
>>> print(designation)
QSO B004848-4242.8
Crab pulsar position (positive declination)
>>> coordinate = SkyCoord('05h34m31.93830s +22d00m52.1758s', frame='icrs')
>>> designation = 'HESS J' + coordinate_iau_format(coordinate, ra_digits=4)
>>> print(designation)
HESS J0534+220
PKS 2155-304 AGN position (negative declination)
>>> coordinate = SkyCoord('21h58m52.06511s -30d13m32.1182s', frame='icrs')
>>> designation = '2FGL J' + coordinate_iau_format(coordinate, ra_digits=5)
>>> print(designation)
2FGL J2158.8-3013
Coordinate array inputs result in list of string output.
>>> coordinates = SkyCoord(ra=[10.68458, 83.82208],
... dec=[41.26917, -5.39111],
... unit=('deg', 'deg'), frame='icrs')
>>> designations = coordinate_iau_format(coordinates, ra_digits=5, prefix='HESS J')
>>> print(designations)
['HESS J0042.7+4116', 'HESS J0535.2-0523']
"""
if coordinate.frame.name == 'galactic':
coordinate = coordinate.transform_to('icrs')
if dec_digits is None:
dec_digits = max(2, ra_digits - 1)
ra_str = ra_iau_format(coordinate.ra, ra_digits)
dec_str = dec_iau_format(coordinate.dec, dec_digits)
if coordinate.isscalar:
out = prefix + ra_str + dec_str
else:
out = [prefix + r + d for (r, d) in zip(ra_str, dec_str)]
return out
def ra_iau_format(ra, digits):
"""Right Ascension part of an IAU source designation.
Reference: http://cdsweb.u-strasbg.fr/Dic/iau-spec.html
====== ========
digits format
====== ========
2 HH
3 HHh
4 HHMM
5 HHMM.m
6 HHMMSS
7 HHMMSS.s
====== ========
Parameters
----------
ra : `~astropy.coordinates.Longitude`
Right ascension.
digits : int (>=2)
Number of digits.
Returns
-------
strrepr : str
IAU format string representation of the angle.
"""
if not isinstance(digits, int) and (digits >= 2):
raise ValueError('Invalid digits: {}. Valid options: int >= 2'.format(digits))
if ra.isscalar:
out = _ra_iau_format_scalar(ra, digits)
else:
out = [_ra_iau_format_scalar(_, digits) for _ in ra]
return out
def _ra_iau_format_scalar(ra, digits):
"""Format a single Right Ascension."""
# Note that Python string formatting always rounds the last digit,
# but the IAU spec requires to truncate instead.
# That's why integers with the correct digits are computed and formatted
# instead of formatting floats directly
ra_h = int(ra.hms[0])
ra_m = int(ra.hms[1])
ra_s = ra.hms[2]
if digits == 2: # format: HH
ra_str = '{0:02d}'.format(ra_h)
elif digits == 3: # format: HHh
ra_str = '{0:03d}'.format(int(10 * ra.hour))
elif digits == 4: # format: HHMM
ra_str = '{0:02d}{1:02d}'.format(ra_h, ra_m)
elif digits == 5: # format : HHMM.m
ra_str = '{0:02d}{1:02d}.{2:01d}'.format(ra_h, ra_m, int(ra_s / 6))
elif digits == 6: # format: HHMMSS
ra_str = '{0:02d}{1:02d}{2:02d}'.format(ra_h, ra_m, int(ra_s))
else: # format: HHMMSS.s
SS = int(ra_s)
s_digits = digits - 6
s = int(10 ** s_digits * (ra_s - SS))
fmt = '{0:02d}{1:02d}{2:02d}.{3:0' + str(s_digits) + 'd}'
ra_str = fmt.format(ra_h, ra_m, SS, s)
return ra_str
def dec_iau_format(dec, digits):
"""Declination part of an IAU source designation.
Reference: http://cdsweb.u-strasbg.fr/Dic/iau-spec.html
====== =========
digits format
====== =========
2 +DD
3 +DDd
4 +DDMM
5 +DDMM.m
6 +DDMMSS
7 +DDMMSS.s
====== =========
Parameters
----------
dec : `~astropy.coordinates.Latitude`
Declination.
digits : int (>=2)
Number of digits.
Returns
-------
strrepr : str
IAU format string representation of the angle.
"""
if not isinstance(digits, int) and digits >= 2:
raise ValueError('Invalid digits: {}. Valid options: int >= 2'.format(digits))
if dec.isscalar:
out = _dec_iau_format_scalar(dec, digits)
else:
out = [_dec_iau_format_scalar(_, digits) for _ in dec]
return out
def _dec_iau_format_scalar(dec, digits):
"""Format a single declination."""
# Note that Python string formatting always rounds the last digit,
# but the IAU spec requires to truncate instead.
# That's why integers with the correct digits are computed and formatted
# instead of formatting floats directly
dec_sign = '+' if dec.deg >= 0 else '-'
dec_d = int(abs(dec.dms[0]))
dec_m = int(abs(dec.dms[1]))
dec_s = abs(dec.dms[2])
if digits == 2: # format: +DD
dec_str = '{}{:02d}'.format(dec_sign, dec_d)
elif digits == 3: # format: +DDd
dec_str = '{:+04d}'.format(int(10 * dec.deg))
elif digits == 4: # format : +DDMM
dec_str = '{}{:02d}{:02d}'.format(dec_sign, dec_d, dec_m)
elif digits == 5: # format: +DDMM.m
dec_str = '{}{:02d}{:02d}.{:01d}'.format(dec_sign, dec_d, dec_m, int(dec_s / 6))
elif digits == 6: # format: +DDMMSS
dec_str = '{}{:02d}{:02d}.{:02d}'.format(dec_sign, dec_d, dec_m, int(dec_s))
else: # format: +DDMMSS.s
SS = int(dec_s)
s_digits = digits - 6
s = int(10 ** s_digits * (dec_s - SS))
fmt = '{}{:02d}{:02d}{:02d}.{:0' + str(s_digits) + 'd}'
dec_str = fmt.format(dec_sign, dec_d, dec_m, SS, s)
return dec_str
def skycoord_from_table(table):
"""Make `~astropy.coordinates.SkyCoord` from lon, lat columns in `~astropy.table.Table`.
This is a convenience function similar to `~astropy.coordinates.SkyCoord.guess_from_table`,
but with the column names we usually use.
TODO: I'm not sure if it's a good idea to use this because it's not always clear
which positions are taken.
"""
try:
keys = table.colnames
except AttributeError:
keys = table.keys()
if set(['RAJ2000', 'DEJ2000']).issubset(keys):
lon, lat, frame = 'RAJ2000', 'DEJ2000', 'icrs'
elif set(['RA', 'DEC']).issubset(keys):
lon, lat, frame = 'RA', 'DEC', 'icrs'
elif set(['GLON', 'GLAT']).issubset(keys):
lon, lat, frame = 'GLON', 'GLAT', 'galactic'
elif set(['glon', 'glat']).issubset(keys):
lon, lat, frame = 'glon', 'glat', 'galactic'
else:
raise KeyError('No column GLON / GLAT or RA / DEC or RAJ2000 / DEJ2000 found.')
unit = table[lon].unit.to_string() if table[lon].unit else 'deg'
skycoord = SkyCoord(table[lon], table[lat], unit=unit, frame=frame)
return skycoord
def select_sky_box(table, lon_lim, lat_lim, frame='icrs', inverted=False):
"""Select sky positions in a box.
This function can be applied e.g. to event lists of source catalogs
or observation tables.
Note: if useful we can add a function that returns the mask
or indices instead of applying the selection directly
Parameters
----------
table : `~astropy.table.Table`
Table with sky coordinate columns.
lon_lim, lat_lim : `~astropy.coordinates.Angle`
Box limits (each should be a min, max tuple).
frame : str, optional
Frame in which to apply the box cut.
Built-in Astropy coordinate frames are supported, e.g.
'icrs', 'fk5' or 'galactic'.
inverted : bool, optional
Invert selection: keep all entries outside the selected region.
Returns
-------
table : `~astropy.table.Table`
Copy of input table with box cut applied.
Examples
--------
>>> selected_obs_table = select_sky_box(obs_table,
... lon_lim=Angle([150, 300], 'deg'),
... lat_lim=Angle([-50, 0], 'deg'),
... frame='icrs')
"""
skycoord = skycoord_from_table(table)
skycoord = skycoord.transform_to(frame)
lon = skycoord.data.lon
lat = skycoord.data.lat
# SkyCoord automatically wraps lon angles at 360 deg, so in case
# the lon range is wrapped at 180 deg, lon angles must be wrapped
# also at 180 deg for the comparison to work
if any(l < Angle(0., 'deg') for l in lon_lim):
lon = lon.wrap_at(Angle(180, 'deg'))
lon_mask = (lon_lim[0] <= lon) & (lon < lon_lim[1])
lat_mask = (lat_lim[0] <= lat) & (lat < lat_lim[1])
mask = lon_mask & lat_mask
if inverted:
mask = np.invert(mask)
return table[mask]
def select_sky_circle(table, lon_cen, lat_cen, radius, frame='icrs', inverted=False):
"""Select sky positions in a circle.
This function can be applied e.g. to event lists of source catalogs
or observation tables.
Note: if useful we can add a function that returns the mask
or indices instead of applying the selection directly
Parameters
----------
table : `~astropy.table.Table`
Table with sky coordinate columns.
lon_cen, lat_cen : `~astropy.coordinates.Angle`
Circle center.
radius : `~astropy.coordinates.Angle`
Circle radius.
frame : str, optional
Frame in which to apply the box cut.
Built-in Astropy coordinate frames are supported, e.g.
'icrs', 'fk5' or 'galactic'.
inverted : bool, optional
Invert selection: keep all entries outside the selected region.
Returns
-------
table : `~astropy.table.Table`
Copy of input | |
#!/usr/bin/env python
import time
import datetime
import os
import random
import threading
import argparse
import torch
import numpy as np
import cv2
from robot import Robot
from trainer import Trainer
from logger import Logger
import utils
from lwrf_infer import LwrfInfer
from policies import Explorer, Coordinator
def main(args):
# --------------- Setup options ---------------
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
workspace_limits = np.asarray([[-0.724, -0.276], [-0.224, 0.224], [-0.0001, 0.4]])
heightmap_resolution = args.heightmap_resolution # Meters per pixel of heightmap
random_seed = args.random_seed
force_cpu = args.force_cpu
# ------------- Algorithm options -------------
future_reward_discount = args.future_reward_discount
stage_epoch = args.stage_epoch
# -------------- Object options --------------
config_file = args.config_file
# -------------- Testing options --------------
is_testing = args.is_testing
test_preset_cases = args.test_preset_cases
test_target_seeking = args.test_target_seeking
max_test_trials = args.max_test_trials # Maximum number of test runs per case/scenario
max_motion_onecase = args.max_motion_onecase
# ------ Pre-loading and logging options ------
load_ckpt = args.load_ckpt # Load pre-trained ckpt of model
critic_ckpt_file = os.path.abspath(args.critic_ckpt) if load_ckpt else None
coordinator_ckpt_file = os.path.abspath(args.coordinator_ckpt) if load_ckpt else None
continue_logging = args.continue_logging # Continue logging from previous session
save_visualizations = args.save_visualizations
print('-----------------------')
if not is_testing:
if continue_logging:
logging_directory = os.path.abspath(args.logging_directory)
print('Pre-loading data logging session: %s' % logging_directory)
else:
timestamp = time.time()
timestamp_value = datetime.datetime.fromtimestamp(timestamp)
logging_directory = os.path.join(os.path.abspath('logs'), timestamp_value.strftime('%Y-%m-%d.%H:%M:%S'))
print('Creating data logging session: %s' % logging_directory)
else:
logging_directory = os.path.join(os.path.abspath('logs'), 'testing/release', config_file.split('/')[-1].split('.')[0])
print('Creating data logging session: %s' % logging_directory)
# Set random seed
np.random.seed(random_seed)
# Initialize pick-and-place system (camera and robot)
robot = Robot(workspace_limits, is_testing, test_preset_cases, config_file)
# Initialize trainer
trainer = Trainer(future_reward_discount, is_testing, load_ckpt, critic_ckpt_file, force_cpu)
# Initialize data logger
logger = Logger(logging_directory)
logger.save_camera_info(robot.cam_intrinsics, robot.cam_pose, robot.cam_depth_scale) # Save camera intrinsics and pose
logger.save_heightmap_info(workspace_limits, heightmap_resolution) # Save heightmap parameters
# Find last executed iteration of pre-loaded log, and load execution info and RL variables
if continue_logging:
trainer.preload(logger.transitions_directory)
# Define light weight refinenet model
lwrf_model = LwrfInfer(use_cuda=trainer.use_cuda, save_path=logger.lwrf_results_directory)
# Define exploration policy (search for the invisible target)
explorer = Explorer(map_size=int(round((workspace_limits[0, 1] - workspace_limits[0, 0]) / heightmap_resolution)))
# Define coordination policy (coordinate target-oriented pushing and grasping)
coordinator = Coordinator(save_dir=logger.coordinator_directory, ckpt_file=coordinator_ckpt_file)
# Initialize variables for grasping fail and exploration probability
grasp_fail_count = [0]
motion_fail_count = [0]
explore_prob = 0.505 if not is_testing else 0.0
# Quick hack for nonlocal memory between threads in Python 2
nonlocal_variables = {'executing_action': False,
'primitive_action': None,
'seeking_target': False,
'best_push_pix_ind': None,
'push_end_pix_yx': None,
'margin_occupy_ratio': None,
'margin_occupy_norm': None,
'best_grasp_pix_ind': None,
'best_pix_ind': None,
'target_grasped': False}
# Parallel thread to process network output and execute actions
# -------------------------------------------------------------
def process_actions():
while True:
if nonlocal_variables['executing_action']:
# Get pixel location and rotation with highest affordance prediction
nonlocal_variables['best_push_pix_ind'], nonlocal_variables['push_end_pix_yx'] = utils.get_push_pix(push_predictions, trainer.model.num_rotations)
nonlocal_variables['best_grasp_pix_ind'] = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
# Visualize executed primitive, and affordances
if save_visualizations:
push_pred_vis = trainer.get_push_prediction_vis(push_predictions, color_heightmap, nonlocal_variables['best_push_pix_ind'], nonlocal_variables['push_end_pix_yx'])
logger.save_visualizations(trainer.iteration, push_pred_vis, 'push')
cv2.imwrite('visualization.push.png', push_pred_vis)
grasp_pred_vis = trainer.get_grasp_prediction_vis(grasp_predictions, color_heightmap, nonlocal_variables['best_grasp_pix_ind'])
logger.save_visualizations(trainer.iteration, grasp_pred_vis, 'grasp')
cv2.imwrite('visualization.grasp.png', grasp_pred_vis)
if nonlocal_variables['seeking_target']:
print('Seeking target in testing mode')
nonlocal_variables['primitive_action'] = 'push'
height_priors = trainer.push_heuristic(valid_depth_heightmap)
prior = np.multiply(height_priors, push_predictions)
post = explorer.get_action_maps(prior)
search_push_pix_ind, search_push_end_pix_yx = utils.get_push_pix(post, trainer.model.num_rotations)
explorer.update(search_push_end_pix_yx)
if save_visualizations:
search_push_pred_vis = trainer.get_push_prediction_vis(post, color_heightmap, search_push_pix_ind, search_push_end_pix_yx)
cv2.imwrite('visualization.search.png', search_push_pred_vis)
nonlocal_variables['best_pix_ind'] = search_push_pix_ind
else:
# Determine whether grasping or pushing should be executed based on network predictions
best_push_conf = np.max(push_predictions)
best_grasp_conf = np.max(grasp_predictions)
print('Primitive confidence scores: %f (push), %f (grasp)' % (best_push_conf, best_grasp_conf))
# Actor
if not is_testing and trainer.iteration < stage_epoch:
print('Greedy deterministic policy ...')
motion_type = 1 if best_grasp_conf > best_push_conf else 0
else:
print('Coordination policy ...')
syn_input = [best_push_conf, best_grasp_conf, nonlocal_variables['margin_occupy_ratio'], nonlocal_variables['margin_occupy_norm'], grasp_fail_count[0]]
motion_type = coordinator.predict(syn_input)
explore_actions = np.random.uniform() < explore_prob
if explore_actions:
print('Exploring actions, explore_prob: %f' % explore_prob)
motion_type = 1-0
nonlocal_variables['primitive_action'] = 'push' if motion_type == 0 else 'grasp'
if nonlocal_variables['primitive_action'] == 'push':
grasp_fail_count[0] = 0
nonlocal_variables['best_pix_ind'] = nonlocal_variables['best_push_pix_ind']
predicted_value = np.max(push_predictions)
elif nonlocal_variables['primitive_action'] == 'grasp':
nonlocal_variables['best_pix_ind'] = nonlocal_variables['best_grasp_pix_ind']
predicted_value = np.max(grasp_predictions)
# Save predicted confidence value
trainer.predicted_value_log.append([predicted_value])
logger.write_to_log('predicted-value', trainer.predicted_value_log)
# Compute 3D position of pixel
print('Action: %s at (%d, %d, %d)' % (nonlocal_variables['primitive_action'], nonlocal_variables['best_pix_ind'][0], nonlocal_variables['best_pix_ind'][1], nonlocal_variables['best_pix_ind'][2]))
best_rotation_angle = np.deg2rad(nonlocal_variables['best_pix_ind'][0]*(360.0/trainer.model.num_rotations))
best_pix_x = nonlocal_variables['best_pix_ind'][2]
best_pix_y = nonlocal_variables['best_pix_ind'][1]
primitive_position = [best_pix_x * heightmap_resolution + workspace_limits[0][0], best_pix_y * heightmap_resolution + workspace_limits[1][0], valid_depth_heightmap[best_pix_y][best_pix_x] + workspace_limits[2][0]]
# If pushing, adjust start position, and make sure z value is safe and not too low
if nonlocal_variables['primitive_action'] == 'push':
finger_width = 0.02
safe_kernel_width = int(np.round((finger_width/2)/heightmap_resolution))
local_region = valid_depth_heightmap[max(best_pix_y - safe_kernel_width, 0):min(best_pix_y + safe_kernel_width + 1, valid_depth_heightmap.shape[0]), max(best_pix_x - safe_kernel_width, 0):min(best_pix_x + safe_kernel_width + 1, valid_depth_heightmap.shape[1])]
if local_region.size == 0:
safe_z_position = workspace_limits[2][0]
else:
safe_z_position = np.max(local_region) + workspace_limits[2][0]
primitive_position[2] = safe_z_position
# Save executed primitive
if nonlocal_variables['primitive_action'] == 'push':
trainer.executed_action_log.append([0, nonlocal_variables['best_pix_ind'][0], nonlocal_variables['best_pix_ind'][1], nonlocal_variables['best_pix_ind'][2]]) # 0 - push
elif nonlocal_variables['primitive_action'] == 'grasp':
trainer.executed_action_log.append([1, nonlocal_variables['best_pix_ind'][0], nonlocal_variables['best_pix_ind'][1], nonlocal_variables['best_pix_ind'][2]]) # 1 - grasp
logger.write_to_log('executed-action', trainer.executed_action_log)
# Initialize variables that influence reward
nonlocal_variables['target_grasped'] = False
motion_fail_count[0] += 1
# Execute primitive
if nonlocal_variables['primitive_action'] == 'push':
robot.push(primitive_position, best_rotation_angle, workspace_limits)
elif nonlocal_variables['primitive_action'] == 'grasp':
grasp_fail_count[0] += 1
grasped_object_name = robot.grasp(primitive_position, best_rotation_angle, workspace_limits)
if grasped_object_name in segment_results['labels']:
print('Grasping succeed, grasped', grasped_object_name)
nonlocal_variables['target_grasped'] = grasped_object_name == target_name
print('Target grasped?:', nonlocal_variables['target_grasped'])
if nonlocal_variables['target_grasped']:
motion_fail_count[0] = 0
grasp_fail_count[0] = 0
else:
# posthoc labeling for data augmentation
augment_id = segment_results['labels'].index(grasped_object_name)
augment_mask_heightmap = seg_mask_heightmaps[:, :, augment_id]
logger.save_augment_masks(trainer.iteration, augment_mask_heightmap)
trainer.augment_ids.append(trainer.iteration)
logger.write_to_log('augment-ids', trainer.augment_ids)
else:
print('Grasping failed')
trainer.target_grasped_log.append(int(nonlocal_variables['target_grasped']))
logger.write_to_log('target-grasped', trainer.target_grasped_log)
# Data for classifier actor
if not is_testing and trainer.iteration >= stage_epoch:
robot.sim_read_config_file(config_file='simulation/random/random-8blocks.txt')
if nonlocal_variables['primitive_action'] == 'grasp' and utils.check_grasp_target_oriented(nonlocal_variables['best_pix_ind'], target_mask_heightmap):
data_label = int(nonlocal_variables['target_grasped'])
print('Collecting classifier data', data_label)
coordinator.memory.push(syn_input, data_label)
nonlocal_variables['executing_action'] = False
time.sleep(0.01)
action_thread = threading.Thread(target=process_actions)
action_thread.daemon = True
action_thread.start()
# -------------------------------------------------------------
# -------------------------------------------------------------
# Replay training function
# -------------------------------------------------------------
def replay_training(replay_id, replay_primitive_action, replay_type=None):
# Load replay RGB-D and mask heightmap
replay_color_heightmap = cv2.imread(os.path.join(logger.color_heightmaps_directory, '%06d.color.png' % (replay_id)))
replay_color_heightmap = cv2.cvtColor(replay_color_heightmap, cv2.COLOR_BGR2RGB)
replay_depth_heightmap = cv2.imread(os.path.join(logger.depth_heightmaps_directory, '%06d.depth.png' % (replay_id)), -1)
replay_depth_heightmap = replay_depth_heightmap.astype(np.float32) / 100000
if replay_type == 'augment':
replay_mask_heightmap = cv2.imread(os.path.join(logger.augment_mask_heightmaps_directory, '%06d.augment.mask.png' % (replay_id)), -1)
else:
replay_mask_heightmap = cv2.imread(os.path.join(logger.target_mask_heightmaps_directory, '%06d.mask.png' % (replay_id)), -1)
replay_mask_heightmap = replay_mask_heightmap.astype(np.float32) / 255
replay_reward_value = trainer.reward_value_log[replay_id][0]
if replay_type == 'augment':
# reward for target_grasped is 1.0
replay_reward_value = 1.0
# Read next states
next_color_heightmap = cv2.imread(os.path.join(logger.color_heightmaps_directory, '%06d.color.png' % (replay_id+1)))
next_color_heightmap = cv2.cvtColor(next_color_heightmap, cv2.COLOR_BGR2RGB)
next_depth_heightmap = cv2.imread(os.path.join(logger.depth_heightmaps_directory, '%06d.depth.png' % (replay_id+1)), -1)
next_depth_heightmap = next_depth_heightmap.astype(np.float32) / 100000
next_mask_heightmap = cv2.imread(os.path.join(logger.target_mask_heightmaps_directory, '%06d.mask.png' % (replay_id+1)), -1)
next_mask_heightmap = next_mask_heightmap.astype(np.float32) / 255
replay_change_detected, _ = utils.check_env_depth_change(replay_depth_heightmap, next_depth_heightmap)
if not replay_change_detected:
replay_future_reward = 0.0
else:
replay_next_push_predictions, replay_next_grasp_predictions, _ = trainer.forward(
next_color_heightmap, next_depth_heightmap, next_mask_heightmap, is_volatile=True)
replay_future_reward = max(np.max(replay_next_push_predictions), np.max(replay_next_grasp_predictions))
new_sample_label_value = replay_reward_value + trainer.future_reward_discount * replay_future_reward
# Get labels for replay and backpropagate
replay_best_pix_ind = (np.asarray(trainer.executed_action_log)[replay_id, 1:4]).astype(int)
trainer.backprop(replay_color_heightmap, replay_depth_heightmap, replay_mask_heightmap,
replay_primitive_action, replay_best_pix_ind, new_sample_label_value)
# Recompute prediction value and label for replay buffer
# Compute forward pass with replay
replay_push_predictions, replay_grasp_predictions, _ = trainer.forward(
replay_color_heightmap, replay_depth_heightmap, replay_mask_heightmap, is_volatile=True)
if replay_primitive_action == 'push':
trainer.predicted_value_log[replay_id] = [np.max(replay_push_predictions)]
trainer.label_value_log[sample_iteration] = [new_sample_label_value]
elif replay_primitive_action == 'grasp':
trainer.predicted_value_log[replay_id] = [np.max(replay_grasp_predictions)]
trainer.label_value_log[sample_iteration] = [new_sample_label_value]
# Reposition function
# -------------------------------------------------------------
def reposition_objects():
robot.restart_sim()
robot.add_objects()
grasp_fail_count[0] = 0
motion_fail_count[0] = 0
trainer.reposition_log.append([trainer.iteration])
logger.write_to_log('reposition', trainer.reposition_log)
augment_training = False
target_name = None
# Start main training/testing loop
# -------------------------------------------------------------
while True:
if test_target_seeking and nonlocal_variables['target_grasped']:
# Restart if target grasped in test_target_seeking mode
reposition_objects()
target_name = None
explorer.reset()
if is_testing:
trainer.model.load_state_dict(torch.load(critic_ckpt_file))
del prev_color_img
# program stopping criterion
if is_testing and len(trainer.reposition_log) >= max_test_trials:
return
print('\n%s iteration: %d' % ('Testing' if is_testing else 'Training', trainer.iteration))
iteration_time_0 = time.time()
# Make sure simulation is still stable (if not, reset simulation)
robot.check_sim()
# Get latest RGB-D image
color_img, depth_img = robot.get_camera_data()
depth_img = depth_img * robot.cam_depth_scale # Apply depth scale from calibration
# Use lwrf to segment/detect target object
segment_results = lwrf_model.segment(color_img)
# Get heightmap from RGB-D image (by re-projecting 3D point cloud)
color_heightmap, depth_heightmap, seg_mask_heightmaps = utils.get_heightmap(
color_img, depth_img, segment_results['masks'], robot.cam_intrinsics, robot.cam_pose, workspace_limits, heightmap_resolution)
valid_depth_heightmap = depth_heightmap.copy()
valid_depth_heightmap[np.isnan(valid_depth_heightmap)] = 0
mask_heightmaps = utils.process_mask_heightmaps(segment_results, seg_mask_heightmaps)
# Check targets
if (len(mask_heightmaps['names']) == 0 and not test_target_seeking) or motion_fail_count[0] >= max_motion_onecase:
# Restart if no targets detected
reposition_objects()
target_name = None
if is_testing:
trainer.model.load_state_dict(torch.load(critic_ckpt_file))
continue
# Choose target
if len(mask_heightmaps['names']) == 0 and test_target_seeking:
nonlocal_variables['seeking_target'] = True
target_mask_heightmap = np.ones_like(valid_depth_heightmap)
else:
nonlocal_variables['seeking_target'] = False
# lwrf_model.display_instances(title=str(trainer.iteration))
if target_name in mask_heightmaps['names']:
| |
<filename>libcloud/common/nttcis.py<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NTTCIS Common Components
"""
import xml.etree.ElementTree as etree
import re
from functools import wraps
from copy import deepcopy
from base64 import b64encode
from time import sleep
from io import BytesIO
try:
from collections.abc import MutableSequence, Mapping
except ImportError:
from collections import MutableSequence, Mapping
# TODO: use disutils.version when Travis CI fixed the pylint issue with version
# from distutils.version import LooseVersion
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.base import ConnectionUserAndKey, XmlResponse, RawResponse
from libcloud.compute.base import Node
from libcloud.utils.py3 import basestring
from libcloud.utils.xml import findtext
from libcloud.compute.types import LibcloudError, InvalidCredsError
# Roadmap / TODO:
#
# 1.0 - Copied from OpSource API, named provider details.
# setup a few variables to represent all of the NTTC-CIS cloud namespaces
NAMESPACE_BASE = "http://oec.api.opsource.net/schemas"
ORGANIZATION_NS = NAMESPACE_BASE + "/organization"
SERVER_NS = NAMESPACE_BASE + "/server"
NETWORK_NS = NAMESPACE_BASE + "/network"
DIRECTORY_NS = NAMESPACE_BASE + "/directory"
GENERAL_NS = NAMESPACE_BASE + "/general"
BACKUP_NS = NAMESPACE_BASE + "/backup"
# API 2.0 Namespaces and URNs
TYPES_URN = "urn:didata.com:api:cloud:types"
# API end-points
API_ENDPOINTS = {
'na': {
'name': 'North America (NA)',
'host': 'api-na.dimensiondata.com',
'vendor': 'NTTC-CIS'
},
'eu': {
'name': 'Europe (EU)',
'host': 'api-eu.dimensiondata.com',
'vendor': 'NTTC-CIS'
},
'au': {
'name': 'Australia (AU)',
'host': 'api-au.dimensiondata.com',
'vendor': 'NTTC-CIS'
},
'au-gov': {
'name': 'Australia Canberra ACT (AU)',
'host': 'api-canberra.dimensiondata.com',
'vendor': 'NTTC-CIS'
},
'af': {
'name': 'Africa (AF)',
'host': 'api-mea.dimensiondata.com',
'vendor': 'NTTC-CIS'
},
'ap': {
'name': 'Asia Pacific (AP)',
'host': 'api-ap.dimensiondata.com',
'vendor': 'NTTC-CIS'
},
'ca': {
'name': 'Canada (CA)',
'host': 'api-canada.dimensiondata.com',
'vendor': 'NTTC-CIS'
},
'is-na': {
'name': 'North America (NA)',
'host': 'usapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-eu': {
'name': 'Europe (EU)',
'host': 'euapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-au': {
'name': 'Australia (AU)',
'host': 'auapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-af': {
'name': 'Africa (AF)',
'host': 'meaapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-ap': {
'name': 'Asia Pacific (AP)',
'host': 'apapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-latam': {
'name': 'South America (LATAM)',
'host': 'latamapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-canada': {
'name': 'Canada (CA)',
'host': 'canadaapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'ntta-na': {
'name': 'North America (NA)',
'host': 'cloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-eu': {
'name': 'Europe (EU)',
'host': 'eucloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-au': {
'name': 'Australia (AU)',
'host': 'aucloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-af': {
'name': 'Africa (AF)',
'host': 'sacloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-ap': {
'name': 'Asia Pacific (AP)',
'host': 'hkcloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'cisco-na': {
'name': 'North America (NA)',
'host': 'iaas-api-na.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-eu': {
'name': 'Europe (EU)',
'host': 'iaas-api-eu.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-au': {
'name': 'Australia (AU)',
'host': 'iaas-api-au.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-af': {
'name': 'Africa (AF)',
'host': 'iaas-api-mea.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-ap': {
'name': 'Asia Pacific (AP)',
'host': 'iaas-api-ap.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-latam': {
'name': 'South America (LATAM)',
'host': 'iaas-api-sa.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-canada': {
'name': 'Canada (CA)',
'host': 'iaas-api-ca.cisco-ccs.com',
'vendor': 'Cisco'
},
'med1-il': {
'name': 'Israel (IL)',
'host': 'api.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-na': {
'name': 'North America (NA)',
'host': 'api-na.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-eu': {
'name': 'Europe (EU)',
'host': 'api-eu.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-au': {
'name': 'Australia (AU)',
'host': 'api-au.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-af': {
'name': 'Africa (AF)',
'host': 'api-af.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-ap': {
'name': 'Asia Pacific (AP)',
'host': 'api-ap.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-latam': {
'name': 'South America (LATAM)',
'host': 'api-sa.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-canada': {
'name': 'Canada (CA)',
'host': 'api-ca.cloud.med-1.com',
'vendor': 'Med-1'
},
'indosat-id': {
'name': 'Indonesia (ID)',
'host': 'iaas-api.indosat.com',
'vendor': 'Indosat'
},
'indosat-na': {
'name': 'North America (NA)',
'host': 'iaas-usapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-eu': {
'name': 'Europe (EU)',
'host': 'iaas-euapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-au': {
'name': 'Australia (AU)',
'host': 'iaas-auapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-af': {
'name': 'Africa (AF)',
'host': 'iaas-afapi.indosat.com',
'vendor': 'Indosat'
},
'bsnl-in': {
'name': 'India (IN)',
'host': 'api.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-na': {
'name': 'North America (NA)',
'host': 'usapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-eu': {
'name': 'Europe (EU)',
'host': 'euapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-au': {
'name': 'Australia (AU)',
'host': 'auapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-af': {
'name': 'Africa (AF)',
'host': 'afapi.bsnlcloud.com',
'vendor': 'BSNL'
}
}
# Default API end-point for the base connection class.
DEFAULT_REGION = 'na'
BAD_CODE_XML_ELEMENTS = (
('responseCode', SERVER_NS),
('responseCode', TYPES_URN),
('result', GENERAL_NS)
)
BAD_MESSAGE_XML_ELEMENTS = (
('message', SERVER_NS),
('message', TYPES_URN),
('resultDetail', GENERAL_NS)
)
def get_params(func):
@wraps(func)
def paramed(*args, **kwargs):
if kwargs:
for k, v in kwargs.items():
old_key = k
matches = re.findall(r'_(\w)', k)
for match in matches:
k = k.replace('_' + match, match.upper())
del kwargs[old_key]
kwargs[k] = v
params = kwargs
result = func(args[0], params)
else:
result = func(args[0])
return result
return paramed
def dd_object_to_id(obj, obj_type, id_value='id'):
"""
Takes in a DD object or string and prints out it's id
This is a helper method, as many of our functions can take either an object
or a string, and we need an easy way of converting them
:param obj: The object to get the id for
:type obj: ``object``
:param func: The function to call, e.g. ex_get_vlan. Note: This
function needs to return an object which has ``status``
attribute.
:type func: ``function``
:rtype: ``str``
"""
if isinstance(obj, obj_type):
return getattr(obj, id_value)
elif isinstance(obj, (basestring)):
return obj
else:
raise TypeError(
"Invalid type %s looking for basestring or %s"
% (type(obj).__name__, obj_type.__name__)
)
# TODO: use disutils.version when Travis CI fixed the pylint issue with version
# This is a temporary workaround.
def LooseVersion(version):
return float(version)
class NetworkDomainServicePlan(object):
ESSENTIALS = "ESSENTIALS"
ADVANCED = "ADVANCED"
class NttCisRawResponse(RawResponse):
pass
class NttCisResponse(XmlResponse):
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError(self.body)
elif self.status == httplib.FORBIDDEN:
raise InvalidCredsError(self.body)
body = self.parse_body()
if self.status == httplib.BAD_REQUEST:
for response_code in BAD_CODE_XML_ELEMENTS:
code = findtext(body, response_code[0], response_code[1])
if code is not None:
break
for message in BAD_MESSAGE_XML_ELEMENTS:
message = findtext(body, message[0], message[1])
if message is not None:
break
raise NttCisAPIException(code=code,
msg=message,
driver=self.connection.driver)
if self.status is not httplib.OK:
raise NttCisAPIException(code=self.status,
msg=body,
driver=self.connection.driver)
return self.body
class NttCisAPIException(LibcloudError):
def __init__(self, code, msg, driver):
self.code = code
self.msg = msg
self.driver = driver
def __str__(self):
return "%s: %s" % (self.code, self.msg)
def __repr__(self):
return ("<NttCisAPIException: code='%s', msg='%s'>" %
(self.code, self.msg))
class NttCisConnection(ConnectionUserAndKey):
"""
Connection class for the NttCis driver
"""
api_path_version_1 = '/oec'
api_path_version_2 = '/caas'
api_version_1 = 0.9
# Earliest version supported
oldest_api_version = '2.2'
# Latest version supported
latest_api_version = '2.7'
# Default api version
active_api_version = '2.7'
_orgId = None
responseCls = NttCisResponse
rawResponseCls = NttCisRawResponse
allow_insecure = False
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, proxy_url=None,
api_version=None, **conn_kwargs):
super(NttCisConnection, self).__init__(
user_id=user_id,
key=key,
secure=secure,
host=host, port=port,
url=url, timeout=timeout,
proxy_url=proxy_url)
if conn_kwargs['region']:
self.host = conn_kwargs['region']['host']
if api_version:
if LooseVersion(api_version) < LooseVersion(
self.oldest_api_version):
msg = 'API Version specified is too old. No longer ' \
'supported. Please upgrade to the latest version {}' \
.format(self.active_api_version)
raise NttCisAPIException(code=None,
msg=msg,
driver=self.driver)
elif LooseVersion(api_version) > LooseVersion(
self.latest_api_version):
msg = 'Unsupported API Version. The version specified is ' \
'not release yet. Please use the latest supported ' \
'version {}' \
.format(self.active_api_version)
raise NttCisAPIException(code=None,
msg=msg,
driver=self.driver)
else:
# Overwrite default version using the version user specified
self.active_api_version = api_version
def add_default_headers(self, headers):
headers['Authorization'] = \
('Basic %s' % b64encode(b('%s:%s' % (self.user_id,
self.key))).decode('utf-8'))
headers['Content-Type'] = 'application/xml'
return headers
def request_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s/%s" % (self.api_path_version_1,
self.api_version_1, action)
return super(NttCisConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_api_2(self, path, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s/%s/%s" % (self.api_path_version_2,
self.active_api_version, path, action)
return super(NttCisConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def raw_request_with_orgId_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_1(), action)
return super(NttCisConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers, raw=True)
def request_with_orgId_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_1(), action)
return super(NttCisConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_with_orgId_api_2(self, | |
<gh_stars>1-10
import discord
from discord.utils import get
from discord.ext import commands, tasks
from functioning import *
from links import *
from responses import *
import mysql.connector as ms
import imdb
import random
import calendar
import pytz
import datetime
import regex
import praw
import pytube
import asyncio
import requests
import wikipedia
import youtube_dl
import urllib.request
from googlesearch import search
from cryptography.fernet import Fernet
# SETUP
prefixes = ["t!","_","thwip ", "thwipper "]
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix=[prefix for prefix in prefixes], intents=intents, case_insensitive=True)
color = discord.Color.from_rgb(65, 95, 255) # 87, 1, 254 | 65, 95, 255 |
bot.remove_command('help')
# SNIPE
deleted_messages = {}
# NUMBER OF REQUESTS
num = 0
# MUSIC
server_index = {}
FFMPEG_OPTS = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn'
}
ydl_op = {
'format':'bestaudio/best',
'postprocessors':[
{
'key':'FFmpegExtractAudio',
'preferredcodec':'mp3',
'preferredquality':'128',
}],}
# DEFAULT TIMEZONE
default_tz = "Asia/Kolkata"
# ENCRYPTER DECRYPTER
key = Fernet.generate_key()
cipher = Fernet(key)
# REDDIT
reddit = praw.Reddit(
client_id = reddit_client_id,
client_secret = reddit_client_secret,
user_agent = reddit_user_agent,
username = reddit_username,
password = <PASSWORD>
)
default_topic = {}
# HELP MENU
help_toggle = 0
# QUIPS
dialogue_list = []
# SQL
conn = ms.connect(host="localhost", user="root", passwd=<PASSWORD>, database="discord")
cursor = conn.cursor()
# //////////////////////////////////////// NON ASYNC FUNCTIONS /////////////////////////////////////
def help_menu():
global help_toggle
embed_help_menu = discord.Embed(title="🕸𝗖𝗼𝗺𝗺𝗮𝗻𝗱 𝗠𝗲𝗻𝘂🕸", description="Prefixes => `[t!] [ _ ] [thwip] [thwipper]`", color=color)
embed_help_menu.set_thumbnail(url=random.choice(url_thumbnails))
embed_help_menu.set_footer(text="New Features Coming Soon 🛠")
if help_toggle == 0:
embed_help_menu.add_field(name="𝗦𝘁𝗮𝗻𝗱𝗮𝗿𝗱",value="hello to greet bot\nhelp to get this menu\nquips to get a famous dialogue or plot\n@Thwipper to get more info about thwipper", inline=False)
if help_toggle == 1:
embed_help_menu.add_field(name="𝗜𝗻𝘁𝗲𝗿𝗻𝗲𝘁",value="w `topic` for wikipedia\ng `topic` to google\nimdb `movie` to get movie details from IMDb\nreddit `topic` to get reddit memes",inline=False)
if help_toggle == 2:
embed_help_menu.add_field(name="𝗗𝗧𝗖", value="dt `timezone` to get IST date and time\ncal `year` `month` to get calendar\nNote: The default timezone is set as `Asia/Kolkata`", inline=False)
if help_toggle == 3:
embed_help_menu.add_field(name="𝗦𝗵𝗲𝗹𝗹𝘀", value="; `query` to use SQL Shell\npy `expression` for python shell\npydoc `method` to get use of that python function", inline=False)
if help_toggle == 4:
embed_help_menu.add_field(name="𝗘𝗻𝗰𝗿𝘆𝗽𝘁𝗲𝗿 𝗗𝗲𝗰𝗿𝘆𝗽𝘁𝗲𝗿", value="hush en `text` to encrypt message\nhush dec `text` to decrypt message\n", inline=False)
if help_toggle == 5:
embed_help_menu.add_field(name="𝗪𝗮𝗹𝗸𝗺𝗮𝗻™",value="cn to get the bot to join voice channel\ndc to remove bot from voice channel\np `name` or `index` to play songs\n▶ res to resume a song\n⏸ pause to pause a song\n⏹ st to stop a song\n🔂 rep to repeat song \n⏭ skip to skip song\n⏮ prev for previous song\n*️⃣ songinfo to get current song\nq `name` to add a song to the queue\nq to view queue\nrem `index` to remove song from queue\ncq to clear queue", inline=False)
if help_toggle == 6:
embed_help_menu.add_field(name="𝗨𝘁𝗶𝗹𝗶𝘁𝘆", value="req to get number of requests\nping to get user latency\nserverinfo to get server's information\npfp to get user's profile picture\nbit to set quality of bitrate\n\polls to see how to conduct a poll\nweb to see deleted message\n.web to troll those who try web command\naddbday `mention` `month` `day` to add a user's birthday from DB\nbday to get thwipper to wish the members\nrembday `mention` to remove a member's birthday from DB.", inline=False)
if help_toggle > 6:
help_toggle = 6
if help_toggle < 0:
help_toggle = 0
return embed_help_menu
def time_converter(seconds):
mins, secs = divmod(seconds, 60)
hours, mins = divmod(mins, 60)
if hours == 0:
return "%02d mins %02d secs" % (mins, secs)
if hours > 0:
return "%d hrs %02d mins %02d secs" % (hours, mins, secs)
def youtube_download(ctx, url):
if True:
with youtube_dl.YoutubeDL(ydl_op) as ydl:
URL = ydl.extract_info(url, download=False)['formats'][0]['url']
return URL
def requests_query():
global cursor
operation = "INSERT INTO requests(number)VALUES({})".format(num)
cursor.execute(operation)
def number_of_requests():
global num # num = 0
num += 1
requests_query()
# ////////////////////////////////// EVENTS //////////////////////////////////////////////////
@bot.event
async def on_ready():
print("{0.user} is now online...\nHey Tamonud! How's it going?".format(bot))
stop = 0
# QUIPS
global dialogue_list
site = requests.get("https://geektrippers.com/spiderman-quotes/").content.decode().replace("<br>","\n").replace("<strong>"," ").replace("</strong>"," ").replace("<em>"," ").replace("</em>"," ").replace("’","'").replace("”",'"\n\r').replace("…","...").replace("“",'"').replace(" "," ").replace("–","-").replace("‘","'")
for i in range(0, 1000):
q = site.find('<p class="has-background" style="background-color:#dedfe0">', stop) + len('<p class="has-background style="background-color:#dedfe0">')
w = site.find("</p>", stop)
stop = w + len("</p>")
dialogues = ""
if not site[q:w]:
continue
else:
dialogues = site[q:w]
dialogue_list += [dialogues]
# STATUSES
@tasks.loop(minutes=10)
async def multiple_statuses():
while True:
for status in status_list:
await asyncio.sleep(300)
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=status))
multiple_statuses.start()
# UPDATION
@tasks.loop(seconds=5.0)
async def updation():
# REQUESTS UPDATE
global cursor
global num
op = "SELECT MAX(number) FROM requests"
cursor.execute(op)
req1 = cursor.fetchall()
req2 = str(req1).replace("[("," ").replace(",)]"," ")
num = int(req2)
conn.commit()
updation.start()
@bot.event
async def on_message(message):
if f"<@!{bot.user.id}>" == message.content:
number_of_requests()
embed = discord.Embed(title="About", description="Hi there!\nI am Thwipper. I was made by `Fairly Rad#1178`. I am a multipurpose bot. From music to famous Spider-Man movie and comic dialogues, I have it all. Also if you want to see how I was made, [click here](https://github.com/spidey711/Thwipper-bot) 👊🏻", color=color)
embed.set_thumbnail(url=bot.user.avatar_url)
embed.set_image(url="https://txt.1001fonts.net/img/txt/dHRmLjcyLjAwMDAwMC5WRWhYU1ZCUVJWSSwuMA,,/lazenby-computer.liquid.png")
embed.set_footer(text="𝗧𝘆𝗽𝗲 _𝘂𝘀𝗲 𝗳𝗼𝗿 𝗰𝗼𝗺𝗺𝗮𝗻𝗱 𝗺𝗲𝗻𝘂", icon_url=message.author.avatar_url)
await message.channel.send(embed=embed)
else:
await bot.process_commands(message)
@bot.event
async def on_message_delete(message):
if not message.channel.id in list(deleted_messages.keys()):
deleted_messages[message.channel.id] = []
if len(message.embeds) <= 0:
deleted_messages[message.channel.id].append((str(message.author.id), message.content))
else:
deleted_messages[message.channel.id].append((str(message.author.id), message.embeds[0], True))
@bot.event
async def on_reaction_add(reaction, user):
number_of_requests()
if not user.bot:
if reaction.emoji == "🖱":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
try:
sub = reddit.subreddit(default_topic[str(reaction.message.guild.id)]).random()
embed = discord.Embed(description="**Caption:\n**{}".format(sub.title), color=color)
embed.set_author(name="Post by: {}".format(sub.author), icon_url=url_reddit_author)
embed.set_thumbnail(url=url_reddit_thumbnail)
embed.set_image(url=sub.url)
embed.set_footer(text="🔺: {} 🔻: {} 💬: {}".format(sub.ups, sub.downs, sub.num_comments))
await reaction.message.edit(embed=embed)
except Exception:
embed = discord.Embed(description="Default topic is not set", color=color)
embed.set_author(name="Uh oh...", icon_url=url_reddit_author)
await reaction.message.edit(embed=embed)
global help_toggle
if reaction.emoji == "➡":
help_toggle += 1
if str(user) != str(bot.user)and reaction.message.author == bot.user:
await reaction.remove(user)
await reaction.message.edit(embed=help_menu())
if reaction.emoji == "⬅":
help_toggle -= 1
if str(user) != str(bot.user)and reaction.message.author == bot.user:
await reaction.remove(user)
await reaction.message.edit(embed=help_menu())
if reaction.emoji == "🕸":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
embed = discord.Embed(title="🕸Mutual Guilds🕸",
description="\n".join([servers.name for servers in user.mutual_guilds]),
color=color)
embed.set_thumbnail(url=random.choice(url_thumbnails))
embed.set_footer(text="New Features Coming Soon 🛠")
await reaction.message.edit(embed=embed)
# MUSIC PLAYER
voice = discord.utils.get(bot.voice_clients, guild=reaction.message.guild)
voice_client = reaction.message.guild.voice_client
playing = reaction.message.guild.voice_client.is_playing()
pause = reaction.message.guild.voice_client.is_paused()
# SERVER QUEUE
operation_view = "SELECT * FROM music_queue WHERE server={}".format(str(reaction.message.guild.id))
cursor.execute(operation_view)
server_queue = cursor.fetchall()
members_in_vc = [str(names) for names in reaction.message.guild.voice_client.channel.members]
if reaction.emoji == "▶":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
if members_in_vc.count(str(user)) > 0:
try:
if server_index[str(reaction.message.guild.id)] is not None:
if pause == True:
voice_client.resume()
embed = discord.Embed(description="**Song: **{}".format(server_queue[server_index[str(reaction.message.guild.id)]][0]).replace(" - YouTube", " "), color=color)
embed.set_author(name="Song Resumed", icon_url=url_author_music)
embed.set_thumbnail(url=pytube.YouTube(url=server_queue[server_index[str(reaction.message.guild.id)]][1]).thumbnail_url)
embed.add_field(name="Uploader", value=pytube.YouTube(url=server_queue[server_index[str(reaction.message.guild.id)]][1]).author, inline=True)
embed.add_field(name="Duration", value=time_converter(pytube.YouTube(url=server_queue[server_index[str(reaction.message.guild.id)]][1]).length), inline=True)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(reaction.message.guild.voice_client.channel.bitrate/1000))
await reaction.message.edit(embed=embed)
else:
if playing == True:
embed = discord.Embed(description="Song is not paused 🤔", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(reaction.message.guild.voice_client.channel.bitrate/1000))
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(description="Nothing is playing right now ❗", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(reaction.message.guild.voice_client.channel.bitrate/1000))
await reaction.message.edit(embed=embed)
else:
if playing != True:
voice_client.resume()
embed = discord.Embed(description="Song has resumed playing ▶", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(reaction.message.guild.voice_client.channel.bitrate/1000))
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(description="Song is already playing 🎸", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(reaction.message.guild.voice_client.channel.bitrate/1000))
await reaction.message.edit(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(description=f"{reaction.message.author.name}, connect to a voice channel first 🔊", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
if reaction.emoji == "⏸":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
if members_in_vc.count(str(user)) > 0:
try:
if playing == True:
voice_client.pause()
embed = discord.Embed(description="Song is paused ⏸", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(reaction.message.guild.voice_client.channel.bitrate/1000))
await reaction.message.edit(embed=embed)
else:
if pause == True:
embed = discord.Embed(description="Song is already paused ⏸", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(reaction.message.guild.voice_client.channel.bitrate/1000))
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(description="No song playing currently ❗", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(reaction.message.guild.voice_client.channel.bitrate/1000))
await reaction.message.edit(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(description=f"{reaction.message.author.name}, connect to a voice channel first 🔊", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
if reaction.emoji == "⏮":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
server_index[str(reaction.message.guild.id)] -= 1
if members_in_vc.count(str(user)) > 0:
try:
URL_queue = youtube_download(reaction.message, server_queue[server_index[str(reaction.message.guild.id)]][1])
if playing != True:
embed = discord.Embed(description="**Song: **{a}\n**Queue Index: **{b}".format(a=server_queue[server_index[str(reaction.message.guild.id)]][0], b=server_index[str(reaction.message.guild.id)]).replace(" - YouTube", " "), color=color)
embed.set_author(name="Now playing", icon_url=url_author_music)
embed.set_thumbnail(url=pytube.YouTube(url=server_queue[server_index[str(reaction.message.guild.id)]][1]).thumbnail_url)
embed.add_field(name="Uploader", value=pytube.YouTube(url=server_queue[server_index[str(reaction.message.guild.id)]][1]).author, inline=True)
embed.add_field(name="Duration", value=time_converter(pytube.YouTube(url=server_queue[server_index[str(reaction.message.guild.id)]][1]).length), inline=True)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(reaction.message.guild.voice_client.channel.bitrate/1000))
await reaction.message.edit(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
else:
voice.stop()
embed = discord.Embed(description="**Song: **{a}\n**Queue Index: **{b}".format(a=server_queue[server_index[str(reaction.message.guild.id)]][0], b=server_index[str(reaction.message.guild.id)]).replace(" - YouTube", " "), color=color)
embed.set_author(name="Now playing", icon_url=url_author_music)
embed.set_thumbnail(url=pytube.YouTube(url=server_queue[server_index[str(reaction.message.guild.id)]][1]).thumbnail_url)
embed.add_field(name="Uploader", value=pytube.YouTube(url=server_queue[server_index[str(reaction.message.guild.id)]][1]).author, inline=True)
embed.add_field(name="Duration", value=time_converter(pytube.YouTube(url=server_queue[server_index[str(reaction.message.guild.id)]][1]).length), inline=True)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(reaction.message.guild.voice_client.channel.bitrate/1000))
| |
#!/usr/bin/env python3
# (1) Wrap if, while, etc. statements in curly braces.
# (2) Insert fences; backup file as *.fibak first if it does not exist yet
# Transformations preserve line numbers.
# ------------------------------------------------------------------------------
# Current musketeer input format (SVN rev >= 4816)
# Input format example (musketeer output; 9 cols):
# fence|peterson.c|thr1|6|c::turn|peterson.c|thr1|7|c::flag2
# Input format example (for e.g. pensieve output; 5 cols):
# fence|peterson.c|5|c::flag1|0
# ------------------------------------------------------------------------------
# Old musketeer input formats:
# Input format example (for regular musketeer output; fixed version; 9 cols):
# fence|test.c|test|5|c::exp|test.c|test|5|c::exp
# Input format example (for regular musketeer output; async version;
# unsupported; 11 cols):
# fence|test.c|test|5|exp|c::exp|test.c|test|5|exp|c::exp
# Input format example (for regular musketeer output; with types; unsupported;
# 13 cols):
# dp|pfscan.c|matchfun|311|line_f|c::line_f|signed_int|f.c|fun|31|*mutex|c::p_l|
# (notice that there's no type for the second access)
# Input format example (for e.g. allshared output; 5 cols):
# fence|assoc.c|125|old_hashtable|Write
# ------------------------------------------------------------------------------
# Implementation notes
# - The newline that terminates a line is considered part of the line.
# - Two ways of specifying lines: line number (1-based), or global index of
# character (0-based).
# - Conceptually, the C file is analyzed by moving around a cursor that points
# at characters in the input file. Various functions are provided to move the
# cursor (e.g. ln_toe() moves the cursor to the end of the current line).
# - Common argument names: pos (cursor position), s (string containing source
# file)
# - Assumption: Input is a well-formed C file.
# - Whitespace (e.g. for eat() and puke()): space, tab, newline.
# - Functions that take ranges of cursor positions treat both as inclusive.
# - Most important top-level functions: insert_fences(), place_fence(),
# place_dp().
# - Two types of temporary variables: pull variable, connection variable. For
# the pull variable we need the correct type of the expression.
# ------------------------------------------------------------------------------
# Todo:
# - Comment handling, e.g. comments at end of line (priority low)
# ------------------------------------------------------------------------------
import re
import sys
import shutil
import os
# ------------------------------------------------------------------------------
# Fence map (for ARM and dp only used when dependency insertion is not possible)
fm_x86 = { 'fence': 'mfence' }
fm_arm = { 'fence': 'dsb', 'cf': 'isb', 'dp': 'dsb' }
# ------------------------------------------------------------------------------
# Configuration parameters set via command line arguments.
handle_dp = False
musk_form = True
fm = fm_x86
# ------------------------------------------------------------------------------
# Indices of items in lines in results.txt in musk format
# -1: gets last element from list
im_fence = 0
im_src_file1 = 1
im_func_name1 = 2
im_line1 = 3
im_exp1 = -1
im_cprover_exp1 = -1
im_type1 = -1
im_src_file2 = 5
im_func_name2 = 6
im_line2 = 7
im_exp2 = -1
im_cprover_exp2 = -1
im_type2 = -1
# ------------------------------------------------------------------------------
# Indices of items in lines in results.txt in other format
io_fence = 0
io_src_file = 1
io_line = 2
# ------------------------------------------------------------------------------
# Enum for possible fence positions
fence_first, fence_second = range(2)
# Config for fence position (where to insert into the code)
fence_pos = fence_first
# ------------------------------------------------------------------------------
def print_err(s):
print(s, file = sys.stderr)
def assert_msg(c, msg):
if not c:
print_err(msg)
assert(False)
def usage():
print_err("Usage:")
print_err(" fi.py (x86|arm) (fence|dp) (musk|other) <results>")
print_err("")
print_err(" 1: Architecture")
print_err(" 2: Select if fence or real dependency should be used for dp's")
print_err(" 3: Specify input format")
print_err(" 4: Output file of musketeer (results.txt)")
# ------------------------------------------------------------------------------
# Functions to delete, replace, and insert symbols from/in a string
### Insert string at position in string
# :pos is exclusive, pos: is inclusive
def insert(s, pos, c):
return s[:pos] + c + s[pos:]
### Delete string between pos1 (inclusive) and pos2 (exclusive)
def delete(s, pos1, pos2):
assert(pos1 <= pos2)
return s[:pos1] + s[pos2:]
### Get line within string (including newline at end)
# pos: position within a line
def extract_ln(s, pos):
start = ln_tos(s, pos)
end = ln_toe(s, pos)
ss = s[start:end+1]
return ss
### Replace regex on line with repl (regex must match on line)
# Current limitation: only string replacement
def replace_ln(s, pos, regex, repl):
start = ln_tos(s, pos)
end = ln_toe(s, pos)
ln = extract_ln(s, pos)
#lnt = re.sub(regex, ln, repl)
lnt = ln.replace(regex, repl, 1)
assert(ln != lnt);
s = delete(s, start, end + 1)
s = insert(s, start, lnt)
return s
### Insert curly braces at specified positions
def wrap(s, pos1, pos2):
assert(pos1 < pos2)
assert(pos2 < len(s))
s = insert(s, '{', pos1)
s = insert(s, '}', pos2 + 1)
return s
### Insert items at given positions of string (l is a list of pairs)
# Items to insert must be single characters
def insert_items(s, l):
l.sort()
cnt = 0
for el in l:
s = insert(s, el[0] + cnt, el[1])
cnt += 1
return s
# ------------------------------------------------------------------------------
# Functions to move cursor to start or end of specific lines.
### Goto start of line of a certain number
# return value: index of first char on line, -1 if line does not exist
def before_line(s, n):
assert(n >= 1)
cnt = 1
for i in range(0, len(s)):
if cnt == n:
return i
if s[i] == '\n':
cnt += 1
return -1
### Goto end of line of a certain number
# return value: index of newline at end of line
def after_line(s, n):
return before_line(s, n + 1) - 1
# ------------------------------------------------------------------------------
# Functions to move cursor to start or end of line, given a position.
### Go from end to start of line
def ln_etos(s, pos):
assert(s[pos] == '\n')
return ln_tos(s, pos)
### Go from start to end of line
def ln_stoe(s, pos):
assert(s[pos] != '\n')
return ln_toe(s, pos)
### Go to start of line
def ln_tos(s, pos):
assert(pos > 0)
if s[pos] == '\n':
pos -= 1
while pos > 0 and s[pos] != '\n':
pos -= 1
if s[pos] == '\n':
pos += 1
assert(s[pos] != '\n')
return pos
assert(False)
### Go to end of line
def ln_toe(s, pos):
assert(pos > 0)
l = len(s)
while pos < l and s[pos] != '\n':
pos += 1
if s[pos] == '\n':
return pos
assert(False)
# ------------------------------------------------------------------------------
# Functions to skip over text items. It is an error to skip to the end of the
# string.
def next_item(s, pos, item):
l = len(s)
assert(pos < l)
ret = s.find(item, pos)
# Debug
if (ret == -1):
assert(s[pos] == '\n')
print('Debug: string for next_item:')
print(s[pos:])
return ret
### Get next semicolon at or after pos
# return value: index of next semicolon, or -1
def next_semicolon(s, pos):
return next_item(s, pos, ';')
### Skip over nested items (return pos of next character), forwards or backwards
# s: file as string
# a: left item
# b: right item
# d: direction(1: forward, -1: backward)
# pos: points at first item (a for forward, b for backward)
def skip_nested(s, a, b, d, pos):
l = len(s)
assert(pos < l)
assert(pos >= 0)
assert(d == -1 or d == 1);
assert(d != 1 or s[pos] == a);
assert(d != -1 or s[pos] == b);
cnt = d
pos += d
while True:
assert(pos >= 0)
assert(pos < l)
if s[pos] == a:
cnt += 1
elif s[pos] == b:
cnt -= 1
pos += d
if cnt == 0:
assert(pos >= 0)
assert(pos < l)
return pos
assert(False)
def skip_p(s, pos):
return skip_nested(s, '(', ')', 1, pos)
def skip_b(s, pos):
return skip_nested(s, '{', '}', 1, pos)
def skip_p_b(s, pos):
return skip_nested(s, '(', ')', -1, pos)
def skip_b_b(s, pos):
return skip_nested(s, '{', '}', -1, pos)
# Return position of current or next non-whitespace character
def eat(s, pos):
l = len(s)
assert(pos < l)
while (pos < l):
c = s[pos]
if c != ' ' and c != '\t' and c != '\n':
return pos
pos += 1
assert(False)
return pos
# Return position of current or previous non-whitespace character.
def puke(s, pos):
l = len(s)
assert(pos < l)
while (pos < l):
c = s[pos]
if c != ' ' and c != '\t' and c != '\n':
return pos
pos -= 1
assert(False)
return pos
# Return position of current or previous non-whitespace character (newline is
# not considered a whitespace character here).
def puke2(s, pos):
l = len(s)
assert(pos < l)
while (pos < l):
c = s[pos]
if c != ' ' and c != '\t':
return pos
pos -= 1
assert(False)
return pos
### Skip over statement (including parentheses).
def skip_stat(s, pos):
l = len(s)
assert(pos < l)
# Skip keyword
if (s[pos:pos+5] == 'while'):
pos += 5
elif (s[pos:pos+2] == 'if'):
pos += 2
elif (s[pos:pos+3] == 'for'):
pos += 3
elif (s[pos:pos+6] == 'switch'):
pos += 6
else:
return -1
pos = eat(s, pos)
if (s[pos] != '('):
# Spurious statement (e.g. in comment | |
e.message)
def DescribeServicesStatus(self, request):
"""本接口(DescribeServicesStatus)用于搜索查询某一个服务或多个服务的列表,并返回服务相关的域名、时间等信息。
:param request: Request instance for DescribeServicesStatus.
:type request: :class:`tencentcloud.apigateway.v20180808.models.DescribeServicesStatusRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.DescribeServicesStatusResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeServicesStatus", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeServicesStatusResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeUsagePlan(self, request):
"""本接口(DescribeUsagePlan)用于查询一个使用计划的详细信息,包括名称、QPS、创建时间绑定的环境等。
:param request: Request instance for DescribeUsagePlan.
:type request: :class:`tencentcloud.apigateway.v20180808.models.DescribeUsagePlanRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.DescribeUsagePlanResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeUsagePlan", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeUsagePlanResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeUsagePlanEnvironments(self, request):
"""本接口(DescribeUsagePlanEnvironments)用于查询使用计划绑定的环境列表。
用户在绑定了某个使用计划到环境后,可使用本接口查询这个使用计划绑定的所有服务的环境。
:param request: Request instance for DescribeUsagePlanEnvironments.
:type request: :class:`tencentcloud.apigateway.v20180808.models.DescribeUsagePlanEnvironmentsRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.DescribeUsagePlanEnvironmentsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeUsagePlanEnvironments", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeUsagePlanEnvironmentsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeUsagePlanSecretIds(self, request):
"""本接口(DescribeUsagePlanSecretIds)用于查询使用计划绑定的密钥列表。
在 API 网关中,一个使用计划可绑定多个密钥对,可使用本接口查询使用计划绑定的密钥列表。
:param request: Request instance for DescribeUsagePlanSecretIds.
:type request: :class:`tencentcloud.apigateway.v20180808.models.DescribeUsagePlanSecretIdsRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.DescribeUsagePlanSecretIdsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeUsagePlanSecretIds", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeUsagePlanSecretIdsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeUsagePlansStatus(self, request):
"""本接口(DescribeUsagePlanStatus)用于查询使用计划的列表。
:param request: Request instance for DescribeUsagePlansStatus.
:type request: :class:`tencentcloud.apigateway.v20180808.models.DescribeUsagePlansStatusRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.DescribeUsagePlansStatusResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeUsagePlansStatus", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeUsagePlansStatusResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DetachPlugin(self, request):
"""解除插件与API绑定
:param request: Request instance for DetachPlugin.
:type request: :class:`tencentcloud.apigateway.v20180808.models.DetachPluginRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.DetachPluginResponse`
"""
try:
params = request._serialize()
body = self.call("DetachPlugin", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DetachPluginResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DisableApiKey(self, request):
"""本接口(DisableApiKey)用于禁用一对 API 密钥。
:param request: Request instance for DisableApiKey.
:type request: :class:`tencentcloud.apigateway.v20180808.models.DisableApiKeyRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.DisableApiKeyResponse`
"""
try:
params = request._serialize()
body = self.call("DisableApiKey", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DisableApiKeyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def EnableApiKey(self, request):
"""本接口(EnableApiKey)用于启动一对被禁用的 API 密钥。
:param request: Request instance for EnableApiKey.
:type request: :class:`tencentcloud.apigateway.v20180808.models.EnableApiKeyRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.EnableApiKeyResponse`
"""
try:
params = request._serialize()
body = self.call("EnableApiKey", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.EnableApiKeyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def GenerateApiDocument(self, request):
"""本接口(GenerateApiDocument)用于自动生成 API 文档和 SDK,一个服务的一个环境生成一份文档和 SDK。
:param request: Request instance for GenerateApiDocument.
:type request: :class:`tencentcloud.apigateway.v20180808.models.GenerateApiDocumentRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.GenerateApiDocumentResponse`
"""
try:
params = request._serialize()
body = self.call("GenerateApiDocument", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GenerateApiDocumentResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyAPIDoc(self, request):
"""修改 API 文档
:param request: Request instance for ModifyAPIDoc.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifyAPIDocRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifyAPIDocResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyAPIDoc", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyAPIDocResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyApi(self, request):
"""本接口(ModifyApi)用于修改 API 接口,可调用此接口对已经配置的 API 接口进行编辑修改。修改后的 API 需要重新发布 API 所在的服务到对应环境方能生效。
:param request: Request instance for ModifyApi.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifyApiRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifyApiResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyApi", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyApiResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyApiApp(self, request):
"""本接口(ModifyApiApp)用于修改已经创建的应用。
:param request: Request instance for ModifyApiApp.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifyApiAppRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifyApiAppResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyApiApp", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyApiAppResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyApiEnvironmentStrategy(self, request):
"""本接口(ModifyApiEnvironmentStrategy)用于修改API限流策略
:param request: Request instance for ModifyApiEnvironmentStrategy.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifyApiEnvironmentStrategyRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifyApiEnvironmentStrategyResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyApiEnvironmentStrategy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyApiEnvironmentStrategyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyApiIncrement(self, request):
"""提供增量更新API能力,主要是给程序调用(区别于ModifyApi,该接口是需要传入API的全量参数,对console使用较友好)
:param request: Request instance for ModifyApiIncrement.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifyApiIncrementRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifyApiIncrementResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyApiIncrement", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyApiIncrementResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyExclusiveInstance(self, request):
"""本接口(ModifyExclusiveInstance)用于修改独享实例信息。
:param request: Request instance for ModifyExclusiveInstance.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifyExclusiveInstanceRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifyExclusiveInstanceResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyExclusiveInstance", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyExclusiveInstanceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyIPStrategy(self, request):
"""本接口(ModifyIPStrategy)用于修改服务IP策略。
:param request: Request instance for ModifyIPStrategy.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifyIPStrategyRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifyIPStrategyResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyIPStrategy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyIPStrategyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyPlugin(self, request):
"""修改API网关插件。
:param request: Request instance for ModifyPlugin.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifyPluginRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifyPluginResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyPlugin", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyPluginResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyService(self, request):
"""本接口(ModifyService)用于修改服务的相关信息。当服务创建后,服务的名称、描述和服务类型均可被修改。
:param request: Request instance for ModifyService.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifyServiceRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifyServiceResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyService", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyServiceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyServiceEnvironmentStrategy(self, request):
"""本接口(ModifyServiceEnvironmentStrategy)用于修改服务限流策略
:param request: Request instance for ModifyServiceEnvironmentStrategy.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifyServiceEnvironmentStrategyRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifyServiceEnvironmentStrategyResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyServiceEnvironmentStrategy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyServiceEnvironmentStrategyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifySubDomain(self, request):
"""本接口(ModifySubDomain)用于修改服务的自定义域名设置中的路径映射,可以修改绑定自定义域名之前的路径映射规则。
:param request: Request instance for ModifySubDomain.
:type request: :class:`tencentcloud.apigateway.v20180808.models.ModifySubDomainRequest`
:rtype: :class:`tencentcloud.apigateway.v20180808.models.ModifySubDomainResponse`
"""
try:
params = request._serialize()
body = self.call("ModifySubDomain", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifySubDomainResponse()
model._deserialize(response["Response"])
return model
else:
code = | |
<reponame>Shiguang-Guo/fairseq<filename>fairseq/models/speech_to_speech/s2s_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
import torch
from torch import Tensor
from fairseq import checkpoint_utils, utils
from fairseq.models import (
FairseqEncoderModel,
FairseqEncoderDecoderModel,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_text import S2TTransformerEncoder
from fairseq.models.speech_to_speech.modules import CTCDecoder, StackedEmbedding
from fairseq.models.text_to_speech import TTSTransformerDecoder
from fairseq.models.transformer import (
Linear,
TransformerDecoder,
TransformerModelBase,
)
logger = logging.getLogger(__name__)
class S2STransformerEncoder(S2TTransformerEncoder):
"""Based on S2T transformer encoder, with support
to incorporate target speaker embedding."""
def __init__(self, args):
super().__init__(args)
self.spk_emb_proj = None
if args.target_speaker_embed:
self.spk_emb_proj = Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
def forward(
self, src_tokens, src_lengths, tgt_speaker=None, return_all_hiddens=False
):
out = super().forward(src_tokens, src_lengths, return_all_hiddens)
if self.spk_emb_proj:
x = out["encoder_out"][0]
seq_len, bsz, _ = x.size()
tgt_speaker_emb = tgt_speaker.view(1, bsz, -1).expand(seq_len, bsz, -1)
x = self.spk_emb_proj(torch.cat([x, tgt_speaker_emb], dim=2))
out["encoder_out"][0] = x
return out
class TransformerUnitDecoder(TransformerDecoder):
"""Based on Transformer decoder, with support to decoding stacked units"""
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn, output_projection
)
self.n_frames_per_step = args.n_frames_per_step
self.out_proj_n_frames = (
Linear(
self.output_embed_dim,
self.output_embed_dim * self.n_frames_per_step,
bias=False,
)
if self.n_frames_per_step > 1
else None
)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
bsz, seq_len, d = x.size()
if self.out_proj_n_frames:
x = self.out_proj_n_frames(x)
x = self.output_layer(x.view(bsz, seq_len, self.n_frames_per_step, d))
x = x.view(bsz, seq_len * self.n_frames_per_step, -1)
if (
incremental_state is None and self.n_frames_per_step > 1
): # teacher-forcing mode in training
x = x[
:, : -(self.n_frames_per_step - 1), :
] # remove extra frames after <eos>
return x, extra
def upgrade_state_dict_named(self, state_dict, name):
if self.n_frames_per_step > 1:
move_keys = [
(
f"{name}.project_in_dim.weight",
f"{name}.embed_tokens.project_in_dim.weight",
)
]
for from_k, to_k in move_keys:
if from_k in state_dict and to_k not in state_dict:
state_dict[to_k] = state_dict[from_k]
del state_dict[from_k]
class S2STransformerMultitaskModelBase(FairseqEncoderDecoderModel):
@classmethod
def build_encoder(cls, args):
encoder = S2STransformerEncoder(args)
pretraining_path = getattr(args, "load_pretrained_encoder_from", None)
if pretraining_path is not None:
if not Path(pretraining_path).exists():
logger.warning(
f"skipped pretraining because {pretraining_path} does not exist"
)
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=pretraining_path
)
logger.info(f"loaded pretrained encoder from: {pretraining_path}")
return encoder
@classmethod
def build_multitask_decoder(cls, args, tgt_dict, in_dim):
decoder_args = args.decoder_args
decoder_args.encoder_embed_dim = in_dim
if args.decoder_type == "transformer":
base_multitask_text_transformer_decoder_arch(decoder_args)
task_decoder = TransformerDecoder(
decoder_args,
tgt_dict,
embed_tokens=TransformerModelBase.build_embedding(
decoder_args,
tgt_dict,
decoder_args.decoder_embed_dim,
),
)
elif args.decoder_type == "ctc":
task_decoder = CTCDecoder(
dictionary=tgt_dict,
in_dim=in_dim,
)
else:
raise NotImplementedError(
"currently only support multitask decoder_type 'transformer', 'ctc'"
)
return task_decoder
@classmethod
def build_model(cls, args, task):
encoder = cls.build_encoder(args)
decoder = (
cls.build_decoder(args, task.target_dictionary)
if task.args.target_is_code
else cls.build_decoder(args)
)
base_model = cls(encoder, decoder)
# set up multitask decoders
base_model.multitask_decoders = {}
for task_name, task_obj in task.multitask_tasks.items():
in_dim = (
args.encoder_embed_dim
if task_obj.args.input_from == "encoder"
else args.decoder_embed_dim
)
task_decoder = cls.build_multitask_decoder(
task_obj.args, task_obj.target_dictionary, in_dim
)
setattr(base_model, f"{task_name}_decoder", task_decoder)
decoder_model_cls = (
FairseqEncoderModel
if task_obj.args.decoder_type == "ctc"
else FairseqLanguageModel
)
base_model.multitask_decoders[task_name] = decoder_model_cls(
getattr(base_model, f"{task_name}_decoder")
)
return base_model
def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs):
return self.encoder(
src_tokens, src_lengths=src_lengths, tgt_speaker=speaker, **kwargs
)
@register_model("s2ut_transformer")
class S2UTTransformerModel(S2STransformerMultitaskModelBase):
"""
Direct speech-to-speech translation model with S2T Transformer encoder + Transformer discrete unit decoder
https://arxiv.org/abs/2107.05604
"""
@staticmethod
def add_args(parser):
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
# speaker
parser.add_argument(
"--speaker-embed-dim",
type=int,
metavar="N",
help="speaker embedding dimension",
)
@classmethod
def build_decoder(cls, args, tgt_dict):
num_embeddings = len(tgt_dict)
padding_idx = tgt_dict.pad()
embed_tokens = StackedEmbedding(
num_embeddings,
args.decoder_embed_dim,
padding_idx,
num_stacked=args.n_frames_per_step,
)
return TransformerUnitDecoder(
args,
tgt_dict,
embed_tokens,
)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
tgt_speaker=None,
return_all_hiddens=False,
):
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
tgt_speaker=tgt_speaker,
return_all_hiddens=return_all_hiddens,
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
)
if return_all_hiddens:
decoder_out[-1]["encoder_states"] = encoder_out["encoder_states"]
decoder_out[-1]["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
]
return decoder_out
@register_model("s2spect_transformer")
class S2SpecTTransformerModel(S2STransformerMultitaskModelBase):
"""
Speech-to-spectrogram model with S2T Transformer encoder + TTS Transformer decoder
"""
@staticmethod
def add_args(parser):
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
# speaker
parser.add_argument(
"--speaker-embed-dim",
type=int,
metavar="N",
help="speaker embedding dimension",
)
# decoder
parser.add_argument("--output-frame-dim", type=int)
# decoder prenet
parser.add_argument("--prenet-dropout", type=float)
parser.add_argument("--prenet-layers", type=int)
parser.add_argument("--prenet-dim", type=int)
# decoder postnet
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
# decoder transformer layers
parser.add_argument("--decoder-transformer-layers", type=int)
parser.add_argument("--decoder-embed-dim", type=int)
parser.add_argument("--decoder-ffn-embed-dim", type=int)
parser.add_argument("--decoder-normalize-before", action="store_true")
parser.add_argument("--decoder-attention-heads", type=int)
@classmethod
def build_decoder(cls, args):
return TTSTransformerDecoder(args, None, padding_idx=1)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
tgt_speaker=None,
incremental_state=None,
target_lengths=None,
speaker=None,
return_all_hiddens=False,
):
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
tgt_speaker=tgt_speaker,
return_all_hiddens=return_all_hiddens,
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
target_lengths=target_lengths,
speaker=speaker,
)
if return_all_hiddens:
decoder_out[-1]["encoder_states"] = encoder_out["encoder_states"]
decoder_out[-1]["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
]
return decoder_out
def base_multitask_text_transformer_decoder_arch(args):
args.dropout = getattr(args, "dropout", 0.3)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
# decoder layer
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
def base_s2st_transformer_encoder_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = | |
from .adt import ADT
from .adt import memo as ADTmemo
from .prelude import *
from . import atl_types as T
from .frontend import AST
from fractions import Fraction
from math import gcd as _gcd
def _lcm(x,y):
return (x*y)//_gcd(x,y)
# notes on symbols to use
# Note that BOOL is a type and Bool a formula constructor function
#from pysmt.shortcuts import (
# Symbol, BOOL, INT, REAL,
# Bool, Int, Real,
# TRUE, FALSE, And, Or, Not,
# GT, GE, LE, LT, Equals, NotEquals,
# Plus, Minus, Times, Div
#)
import pysmt
from pysmt import shortcuts as SMT
def _get_smt_solver():
factory = pysmt.factory.Factory(pysmt.shortcuts.get_env())
slvs = factory.all_solvers()
if len(slvs) == 0: raise OSError("Could not find any SMT solvers")
return pysmt.shortcuts.Solver(name=next(iter(slvs)))
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Description of Bounds-Checking Problem
BD = ADT("""
module BD {
sys = VarIntro ( sym name, sys cont, srcinfo srcinfo )
| RelIntro ( sym name, int n_args,
sys cont, srcinfo srcinfo )
| Guard ( pred pred, sys cont, srcinfo srcinfo )
| Both ( sys lhs, sys rhs )
| Check ( pred pred, sys cont, srcinfo srcinfo )
| NullSys ()
pred = GTZ ( affine expr )
| GEZ ( affine expr )
| EQZ ( affine expr )
| Rel ( sym name, sym* args )
| Conj ( pred lhs, pred rhs )
| Disj ( pred lhs, pred rhs )
affine = ( fraction offset, term* terms )
term = ( fraction coeff, sym var )
}
""", {
'sym': lambda x: type(x) is Sym,
'fraction': lambda x: type(x) is Fraction,
'srcinfo': lambda x: type(x) is SrcInfo,
})
ADTmemo(BD,['NullSys'])
BD.null = BD.NullSys()
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Operator Overloading to help construct affine expressions
def _alift_(obj):
if type(obj) is BD.affine:
return obj
elif type(obj) is BD.term:
return BD.affine( Fraction(0), [obj] )
elif type(obj) is Sym:
return BD.affine( Fraction(0), [ BD.term(Fraction(1),obj) ] )
elif type(obj) is int:
return BD.affine( Fraction(obj), [] )
elif type(obj) is Fraction:
return BD.affine( obj, [] )
else: assert False, f"unsupported affine lifting for type {type(obj)}"
@extclass(BD.affine)
def __add__(lhs,rhs):
rhs = _alift_(rhs)
off = lhs.offset + rhs.offset
terms = lhs.terms.copy()
terms.extend(rhs.terms)
return BD.affine( off, terms )
@extclass(BD.affine)
def __radd__(rhs,lhs): return _alift_(lhs) + rhs
@extclass(BD.affine)
def __neg__(arg):
off = -arg.offset
terms = [ BD.term( -t.coeff, t.var ) for t in arg.terms ]
return BD.affine( off, terms )
@extclass(BD.affine)
def __sub__(lhs,rhs): return lhs + (-_alift_(rhs))
@extclass(BD.affine)
def __rsub__(rhs,lhs): return _alift_(lhs) + (-rhs)
@extclass(BD.affine)
def __mul__(lhs,rhs):
assert type(rhs) is Fraction, "expected fraction to scale by"
off = lhs.offset * rhs
terms = [ BD.term( t.coeff * rhs, t.var ) for t in lhs.terms ]
return BD.affine( off, terms )
@extclass(BD.affine)
def __rmul__(rhs,lhs): return rhs * lhs
@extclass(BD.affine)
def __gt__(lhs,rhs):
return BD.GTZ( lhs - rhs )
@extclass(BD.affine)
def __ge__(lhs,rhs):
return BD.GEZ( lhs - rhs )
@extclass(BD.affine)
def __lt__(lhs,rhs):
return BD.GTZ( rhs - lhs )
@extclass(BD.affine)
def __le__(lhs,rhs):
return BD.GEZ( rhs - lhs )
@extclass(BD.affine)
def eq(lhs,rhs):
return BD.EQZ( lhs - rhs )
del __add__, __radd__, __neg__, __sub__, __rsub__, __mul__, __rmul__
del __gt__, __lt__, __ge__, __le__, eq
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Extraction of Bounds-Checking Problem
class BoundsExtraction:
def __init__(self, f):
self._ctxt = Context()
# pack context with relation sizes
for rd in f.relations:
self._ctxt.set(rd.name, rd.sizes)
# construct the system corresponding to the expression body
sys = self.extract(f.body)
# and then wrap that in variable declarations for sizes/relations
for rd in reversed(f.relations):
sys = BD.RelIntro( rd.name, len(rd.sizes), sys, rd.srcinfo )
for sz in reversed(f.sizes):
sys = BD.VarIntro( sz.name, sys, sz.srcinfo )
self._sys = sys
def system(self): return self._sys
def _get_rel_sizes(self,rname):
szs = self._ctxt.get(rname)
assert szs is not None, "Expected all relation lookups to succeed"
return szs
def extract(self, e):
eclass = type(e)
# do not perform bounds-checks on code that was already checked.
if hasattr(e, 'func_call_sub'):
return BD.null
elif eclass is AST.Var or eclass is AST.Const:
return BD.null
elif eclass is AST.BinOp:
lhs = self.extract(e.lhs)
rhs = self.extract(e.rhs)
if lhs is BD.null: return rhs
elif rhs is BD.null: return lhs
else: return BD.Both(lhs,rhs)
elif eclass is AST.Tuple:
sys = BD.null
for a in e.args:
s = self.extract(a)
if sys is BD.null:
sys = s
elif s is not BD.null:
sys = BD.Both( s, sys )
return sys
elif eclass is AST.Proj:
return self.extract(e.arg)
elif eclass is AST.TensorLit:
sys = BD.null
for a in e.args:
s = self.extract(a)
if sys is BD.null:
sys = s
elif s is not BD.null:
sys = BD.Both( s, sys )
return sys
elif eclass is AST.Gen or eclass is AST.Sum:
# a sanity check; maybe not necessary, but if true
# this greatly simplifies naming issues, so let's assert for now
assert self._ctxt.get(e.name) is None, "Expected unique name symbols"
# build guard predicate (let x be e.name)
lo_bd = (_alift_(e.name) >= 0)
hi_bd = (_alift_(e.name) < e.range)
guard = BD.Conj( lo_bd, hi_bd )
# assemble system
body = self.extract(e.body)
if body is BD.null:
return body
else:
body = BD.Guard( guard, body, e.srcinfo )
return BD.VarIntro( e.name, body, e.srcinfo )
elif eclass is AST.Access:
# handle multiple accesses and w.r.t the tensor type...
sys = self.extract(e.base)
typ = e.base.type
for i_ast in e.idx:
i = self.index(i_ast)
rng = typ.range
typ = typ.type
lo_chk = (i >= 0)
hi_chk = (i < rng)
sys = BD.Check( BD.Conj(lo_chk, hi_chk), sys, e.srcinfo )
return sys
elif eclass is AST.BuiltIn:
# the built-in itself has no effect
sys = BD.null
for a in e.args:
s = self.extract(a)
if sys is BD.null:
sys = s
elif s is not BD.null:
sys = BD.Both( s, sys )
return sys
elif eclass is AST.Indicate:
# relational predicates may require introducing
# additional bounds checks on their arguments
guard, checks = self.pred(e.pred)
body = self.extract(e.body)
if body is not BD.null:
body = BD.Guard( guard, body, e.srcinfo )
# now wrap with any checks regardless of whether body is null
for name,eq,chk,srcinfo in checks:
body = BD.VarIntro( name,
BD.Guard( eq,
BD.Check(chk,body,srcinfo),
srcinfo ),
srcinfo )
return body
elif eclass is AST.Let:
sys = self.extract(e.ret)
for s in e.stmts:
rhs = self.extract(s.rhs)
if sys is BD.null:
sys = rhs
elif rhs is not BD.null:
sys = BD.Both( rhs, sys )
return sys
else: assert False, "unexpected case"
def index(self, e):
eclass = type(e)
if eclass is AST.IdxConst:
return _alift_(e.val)
elif eclass is AST.IdxVar or eclass is AST.IdxSize:
return _alift_(e.name)
elif eclass is AST.IdxAdd:
return self.index(e.lhs) + self.index(e.rhs)
elif eclass is AST.IdxSub:
return self.index(e.lhs) - self.index(e.rhs)
elif eclass is AST.IdxScale:
return e.coeff * self.index(e.idx)
else: assert False, "unexpected case"
# returns a second "checks" list of type
# [(var_name, var_eq, var_bd_chk, err_msg, srcinfo)]
def pred(self, p):
pclass = type(p)
if pclass is AST.Cmp:
lhs = self.index(p.lhs)
rhs = self.index(p.rhs)
if p.op == "<": return lhs < rhs, []
elif p.op == ">": return lhs > rhs, []
elif p.op == "<=": return lhs <= rhs, []
elif p.op == ">=": return lhs >= rhs, []
elif p.op == "==": return lhs.eq(rhs), []
else: assert False, f"Unrecognized Op {p.op}"
elif pclass is AST.Relation:
sizes = self._get_rel_sizes(p.name)
args, checks = [], []
for k,(i_arg,N) in enumerate(zip(p.args,sizes)):
i = self.index(i_arg)
v = Sym(f"{p.name}{k}")
def_eq = i.eq(v)
bd_chk = BD.Conj( i >= 0, i < N )
args.append(v)
checks.append( (v,def_eq,bd_chk,i_arg.srcinfo) )
return BD.Rel(p.name, args), checks
elif pclass is AST.Conj or pclass is AST.Disj:
lhs, lchk = self.pred(p.lhs)
rhs, rchk = self.pred(p.rhs)
lchk.extend(rchk)
ctr = BD.Conj if pclass is AST.Conj else BD.Disj
return ctr(lhs,rhs), lchk
else: assert False, "Impossible Case"
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Execution of Bounds-Checking Problem
class BoundsCheck:
def __init__(self,sys):
assert isinstance(sys, BD.sys), "Expected a bounds-system..."
self._slv = _get_smt_solver()
self._slv.reset_assertions()
self._ctxt = Context()
self._errors = []
self._slv.push()
self.check(sys)
self._slv.pop()
self.report_errors()
def report_errors(self):
if len(self._errors) == 0: return
errs = []
errs.append("Failed Bounds Checks:")
for srcinfo,msg in self._errors:
errs.append(f"{srcinfo}: {msg}")
raise TypeError("\n".join(errs))
def _err(self, node, msg):
self._errors.append((node.srcinfo, msg))
def _get_solution(self, pred):
smt_syms = [ smt_sym for nm,smt_sym in self._ctxt.items()
if smt_sym.get_type() == SMT.INT ]
self._slv.push()
self._slv.add_assertion(pred)
val_map = self._slv.get_py_values(smt_syms)
self._slv.pop()
mapping = []
for nm,smt_sym in self._ctxt.items():
if smt_sym.get_type() == SMT.INT:
mapping.append(f" {nm} = {val_map[smt_sym]}")
return "\n".join(mapping)
def check(self, sys):
styp = type(sys)
if styp is BD.VarIntro:
smtsym = SMT.Symbol(repr(sys.name), | |
from __future__ import print_function, division
import numpy as np
import os
import cv2
from PIL import Image
import random
from functools import partial
import tensorflow as tf
from keras.models import Model, Sequential, load_model
from keras.layers.merge import _Merge
from keras.layers import Input, Conv2D, MaxPooling2D, ZeroPadding2D, Conv2D, BatchNormalization, UpSampling2D, Activation
from keras.layers import Reshape, Dropout, Concatenate, Lambda, Multiply, Add, Flatten, Dense
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.optimizers import Adam
from keras import backend as K
import keras
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import cv2
from sklearn.utils import shuffle
import random
import datetime
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
import math
from skimage.measure import compare_psnr, compare_ssim
from keras.utils import multi_gpu_model
from scipy.stats import pearsonr
def load_confocal(input_shape=None, set=None, z_depth=None):
dir = '/home/sudong/confocal/' + set
lr_lq_set = []
hr_lq_set = []
lr_hq_set = []
hr_hq_set = []
for _, _, files in os.walk(dir+'/'+z_depth):
for file in files:
if int(file.split('_')[-1].split('.')[0]) < len(files) * 0.8:
img_lq = cv2.imread(dir+'/'+z_depth + '/' + file)
img = cv2.resize(img_lq, (input_shape[0], input_shape[1]))
lr_lq_set.append(img)
img = cv2.resize(img_lq, (input_shape[0]*4, input_shape[1]*4))
hr_lq_set.append(img)
file = 'Z7_' + file.split('_')[1]
img_hq = cv2.imread(dir+'/Z007' + '/' + file)
img = cv2.resize(img_hq, (input_shape[0]*4, input_shape[1]*4))
hr_hq_set.append(img)
img = cv2.resize(img_hq, (input_shape[0], input_shape[1]))
lr_hq_set.append(img)
# hrhq, lrhq, hrlq, lrlq = shuffle(hr_hq_set, lr_hq_set, hr_lq_set, lr_lq_set)
hrhq, lrhq, hrlq, lrlq = hr_hq_set, lr_hq_set, hr_lq_set, lr_lq_set
hrhq_train = hrhq
lrhq_train = lrhq
hrlq_train = hrlq
lrlq_train = lrlq
lr_lq_set = []
hr_lq_set = []
lr_hq_set = []
hr_hq_set = []
for _, _, files in os.walk(dir+'/'+z_depth):
for file in files:
if int(file.split('_')[-1].split('.')[0]) >= len(files) * 0.8:
img_lq = cv2.imread(dir+'/'+z_depth + '/' + file)
img = cv2.resize(img_lq, (input_shape[0], input_shape[1]))
lr_lq_set.append(img)
img = cv2.resize(img_lq, (input_shape[0]*4, input_shape[1]*4))
hr_lq_set.append(img)
file = 'Z7_' + file.split('_')[1]
img_hq = cv2.imread(dir+'/Z007' + '/' + file)
img = cv2.resize(img_hq, (input_shape[0]*4, input_shape[1]*4))
hr_hq_set.append(img)
img = cv2.resize(img_hq, (input_shape[0], input_shape[1]))
lr_hq_set.append(img)
# hrhq, lrhq, hrlq, lrlq = shuffle(hr_hq_set, lr_hq_set, hr_lq_set, lr_lq_set)
hrhq, lrhq, hrlq, lrlq = hr_hq_set, lr_hq_set, hr_lq_set, lr_lq_set
hrhq_test = hrhq
lrhq_test = lrhq
hrlq_test = hrlq
lrlq_test = lrlq
hrhq_train = np.array(hrhq_train)
# hrhq_train = hrhq_train.astype('float32') /127.5 - 1.
hrhq_train = hrhq_train.astype('float32') /127.5 - 1.
hrhq_test = np.array(hrhq_test)
hrhq_test = hrhq_test.astype('float32') /127.5 - 1.
lrhq_train = np.array(lrhq_train)
lrhq_train = lrhq_train.astype('float32') /127.5 - 1.
lrhq_test = np.array(lrhq_test)
lrhq_test = lrhq_test.astype('float32') /127.5 - 1.
hrlq_train = np.array(hrlq_train)
hrlq_train = hrlq_train.astype('float32') /127.5 - 1.
hrlq_test = np.array(hrlq_test)
hrlq_test = hrlq_test.astype('float32') /127.5 - 1.
lrlq_train = np.array(lrlq_train)
lrlq_train = lrlq_train.astype('float32') /127.5 - 1.
lrlq_test = np.array(lrlq_test)
lrlq_test = lrlq_test.astype('float32') /127.5 - 1.
print(hrhq_train.shape)
print(hrhq_test.shape)
return hrhq_train, hrhq_test, lrhq_train, lrhq_test, hrlq_train, hrlq_test, lrlq_train, lrlq_test
class RandomWeightedAverage(_Merge):
"""Provides a (random) weighted average between real and generated image samples"""
def define_batch_size(self, bs):
self.bs = bs
def _merge_function(self, inputs):
alpha = K.random_uniform((self.bs, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
class StarGAN(object):
def __init__(self):
# Model configuration.
self.channels = 3
# self.lr_height = 180 # Low resolution height
# self.lr_width = 320 # Low resolution width
self.lr_height = 128 # Low resolution height
self.lr_width = 128 # Low resolution width
self.lr_shape = (self.lr_height, self.lr_width, self.channels)
self.hr_height = self.lr_height*4 # High resolution height
self.hr_width = self.lr_width*4 # High resolution width
self.hr_shape = (self.hr_height, self.hr_width, self.channels)
self.n_residual_blocks = 9
optimizer = Adam(0.0001, 0.5, 0.99)
# We use a pre-trained VGG19 model to extract image features from the high resolution
# and the generated high resolution images and minimize the mse between them
self.vgg_hq = self.build_vgg_hr(name='vgg_hq')
self.vgg_hq.trainable = False
self.vgg_hq_m = multi_gpu_model(self.vgg_hq, gpus=3)
self.vgg_hq_m.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
self.vgg_lq = self.build_vgg_hr(name='vgg_lq')
self.vgg_lq.trainable = False
self.vgg_lq_m = multi_gpu_model(self.vgg_lq, gpus=3)
self.vgg_lq_m.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
# Calculate output shape of D (PatchGAN)
patch_hr_h = int(self.hr_height / 2 ** 4)
patch_hr_w = int(self.hr_width / 2 ** 4)
self.disc_patch_hr = (patch_hr_h, patch_hr_w, 1)
# Number of filters in the first layer of G and D
self.gf = 64
self.df = 64
self.discriminator_hq = self.build_discriminator(name='dis_hq')
self.discriminator_hq_m = multi_gpu_model(self.discriminator_hq, gpus=3)
self.discriminator_hq_m.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
self.discriminator_lq = self.build_discriminator(name='dis_lq')
self.discriminator_lq_m = multi_gpu_model(self.discriminator_lq, gpus=3)
self.discriminator_lq_m.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator_lq2hq = self.build_generator(name='gen_lq2hq')
self.generator_hq2lq = self.build_generator(name='gen_hq2lq')
# High res. and low res. images
# img_hrhq = Input(shape=self.hr_shape)
img_lq = Input(shape=self.hr_shape)
img_hq = Input(shape=self.hr_shape)
fake_hq = self.generator_lq2hq(img_lq)
fake_lq = self.generator_hq2lq(img_hq)
reconstr_lq = self.generator_hq2lq(fake_hq)
reconstr_hq = self.generator_lq2hq(fake_lq)
img_lq_id = self.generator_hq2lq(img_lq)
img_hq_id = self.generator_lq2hq(img_hq)
fake_hq_features = self.vgg_hq(fake_hq)
fake_lq_features = self.vgg_lq(fake_lq)
reconstr_hq_features = self.vgg_hq(reconstr_hq)
reconstr_lq_features = self.vgg_lq(reconstr_lq)
self.discriminator_hq.trainable = False
self.discriminator_lq.trainable = False
validity_hq = self.discriminator_hq(fake_hq)
validity_lq = self.discriminator_lq(fake_lq)
validity_reconstr_hq = self.discriminator_hq(reconstr_hq)
validity_reconstr_lq = self.discriminator_lq(reconstr_lq)
self.combined_hq = Model([img_lq, img_hq], [validity_hq, validity_reconstr_lq,
fake_hq_features, reconstr_lq_features, img_lq_id])
self.combined_hq_m = multi_gpu_model(self.combined_hq, gpus=4)
self.combined_hq_m.compile(loss=['mse', 'mse', 'mse', 'mse', 'mse'],
loss_weights=[1e-3, 1e-3, 1, 1, 1],
optimizer=optimizer)
self.combined_lq = Model([img_lq, img_hq], [validity_lq, validity_reconstr_hq,
fake_lq_features, reconstr_hq_features, img_hq_id])
self.combined_lq_m = multi_gpu_model(self.combined_lq, gpus=4)
self.combined_lq_m.compile(loss=['mse', 'mse', 'mse', 'mse', 'mse'],
loss_weights=[1e-3, 1e-3, 1, 1, 1],
optimizer=optimizer)
def build_vgg_hr(self, name=None):
"""
Builds a pre-trained VGG19 model that outputs image features extracted at the
third block of the model
"""
# vgg = VGG16(include_top=False, weights="/home/amax/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5")
# vgg.outputs = [vgg.layers[8].output]
vgg = VGG19(include_top=False, weights="/home/sudong/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5")
vgg.outputs = [vgg.layers[9].output]
img = Input(shape=self.hr_shape)
# Extract image features
# with tf.device('/gpu:0') :
img_features = vgg(img)
model = Model(img, img_features, name=name)
model.summary()
return model
def build_generator(self, name=None):
def residual_block(layer_input, filters):
"""Residual block described in paper"""
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)
d = InstanceNormalization()(d)
d = Activation('relu')(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = InstanceNormalization()(d)
d = Add()([d, layer_input])
return d
# Low resolution image input
img_lr = Input(shape=self.hr_shape)
# with tf.device('/gpu:0') :
# Pre-residual block
#c1 = Conv2D(64, kernel_size=9, strides=1, padding='same')(img_lr)
c1 = Conv2D(64, kernel_size=7, strides=1, padding='same')(img_lr)
c1 = InstanceNormalization()(c1)
c1 = Activation('relu')(c1)
n_downsampling = 2
for i in range(n_downsampling):
mult = 2 ** i
c1 = Conv2D(filters=64 * mult * 2, kernel_size=(3, 3), strides=2, padding='same')(c1)
c1 = InstanceNormalization()(c1)
c1 = Activation('relu')(c1)
# Propogate through residual blocks
r = residual_block(c1, self.gf * (n_downsampling ** 2))
for _ in range(8):
r = residual_block(r, self.gf * (n_downsampling ** 2))
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
r = UpSampling2D()(r)
r = Conv2D(filters=int(64 * mult / 2), kernel_size=(3, 3), padding='same')(r)
r = InstanceNormalization()(r)
r = Activation('relu')(r)
# Post-residual block
c2 = Conv2D(self.channels, kernel_size=7, strides=1, padding='same')(r)
c2 = Activation('tanh')(c2)
c2 = Add()([c2, img_lr])
model = Model(img_lr, [c2], name=name)
model.summary()
return model
def build_discriminator(self, name=None):
n_layers, use_sigmoid = 3, False
inputs = Input(shape=self.hr_shape)
ndf=64
# with tf.device('/gpu:0'):
x = Conv2D(filters=ndf, kernel_size=(4, 4), strides=2, padding='same')(inputs)
x = LeakyReLU(0.2)(x)
nf_mult, nf_mult_prev = 1, 1
for n in range(n_layers):
nf_mult_prev, nf_mult = nf_mult, min(2 ** n, 8)
x = Conv2D(filters=ndf * nf_mult, kernel_size=(4, 4), strides=2, padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
nf_mult_prev, nf_mult = nf_mult, min(2 ** n_layers, 8)
x = Conv2D(filters=ndf * nf_mult, kernel_size=(4, 4), strides=1, padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
x = Conv2D(filters=1, kernel_size=(4, 4), strides=1, padding='same')(x)
if use_sigmoid:
x = Activation('sigmoid')(x)
# x = Flatten()(x)
x = Dense(1024, activation='tanh')(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=x, name=name)
model.summary()
return model
def build_discriminator_lr(self):
def d_block(layer_input, filters, strides=1, bn=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)
if bn:
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
return d
# with tf.device('/gpu:0'):
# Input img
d0 = Input(shape=self.lr_shape)
# d1 = d_block(d0, self.df, bn=False)
# d2 = d_block(d1, self.df, strides=2)
# d3 = d_block(d2, self.df * 2)
# d4 = d_block(d3, self.df * 2, strides=2)
# # d5 = d_block(d4, self.df * 4)
# # d6 = d_block(d5, self.df * 4, strides=2)
# # d7 = d_block(d6, self.df * 8)
# # d8 = d_block(d7, self.df * 8, strides=2)
d1 = d_block(d0, self.df, bn=False)
d2 = d_block(d1, self.df, strides=2)
# d3 = d_block(d2, self.df * 2)
d4 = d_block(d2, self.df * 2, strides=2)
d9 = Dense(self.df * 4)(d4)
d10 = LeakyReLU(alpha=0.2)(d9)
validity = Dense(1, activation='sigmoid')(d10)
model = Model(d0, validity)
model.summary()
return Model(d0, validity)
def train(self, model, epochs, batch_size, sample_interval, set=None, z_depth=None):
# input_shape = (180, 320, 3)
input_shape = (128, 128, 3)
start_time = datetime.datetime.now()
weigths_dir = model + '_weights'
img_dir = model + '_img'
log_dir = model + '_logs/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
if not os.path.exists(weigths_dir):
os.makedirs(weigths_dir)
if not os.path.exists(img_dir):
| |
<filename>falcon_kit/run_support.py
from . import bash, functional
from .functional import cfg_tobool
from .io import NativeIO
from .util.system import (make_fofn_abs, make_dirs, cd)
import json
import logging
import logging.config
import os
import re
import io
import sys
import tempfile
import time
import uuid
logger = logging.getLogger(__name__)
from configparser import ConfigParser
def _prepend_env_paths(content, names):
"""
E.g.
names = ['PATH', 'PYTYHONPATH']
content =
echo hi
=>
export PATH=current:path:${PATH}
export PYTHON=current:path:${PYTHONPATH}
echo hi
"""
export_env_vars = ['export %(k)s=%(v)s:${%(k)s}' % dict(
k=name, v=os.environ.get(name, '')) for name in names]
return '\n'.join(export_env_vars + [content])
def update_env_in_script(fn, names):
"""Modify fn using on prepend_env_paths().
"""
with open(fn) as ifs:
content = ifs.read()
content = _prepend_env_paths(content, names)
with open(fn, 'w') as ofs:
ofs.write(content)
def use_tmpdir_for_files(basenames, src_dir, link_dir):
"""NOT USED. Kept only for reference. This will be done in pypeFLOW.
Generate script to copy db files to tmpdir (for speed).
- Choose tmp_dir, based on src_dir name.
- rsync basenames into tmp_dir # after 'flock', per file
- symlink from link_dir into tmp_dir.
Return list of script lines, sans linefeed.
"""
script = list()
unique = os.path.abspath(src_dir).replace('/', '_')
root = tempfile.gettempdir()
tmp_dir = os.path.join(root, 'falcon', unique)
script.append('mkdir -p %s' % tmp_dir)
for basename in basenames:
src = os.path.join(src_dir, basename)
dst = os.path.join(tmp_dir, basename)
rm_cmd = 'rm -f %s' % basename
# Wait on lock for up to 10 minutes, in case of very large files.
rsync_cmd = "flock -w 600 %s.lock -c 'rsync -av %s %s'" % (
dst, src, dst)
ln_cmd = 'ln -sf %s %s' % (dst, basename)
script.extend([rm_cmd, rsync_cmd, ln_cmd])
return script
def make_job_data(url, script_fn):
"""Choose defaults.
Run in same directory as script_fn.
Base job_name on script_fn.
"""
wd = os.path.dirname(script_fn)
job_name = '{0}-{1}-{2}'.format(
os.path.basename(script_fn),
url.split("/")[-1],
str(uuid.uuid4())[:8],
)
job_data = {"job_name": job_name,
"cwd": wd,
"script_fn": script_fn}
return job_data
def check_HPCdaligner_option(option):
msg = ''
if '-dal' in option:
msg += 'HPC.daligner option "-dal" has changed to "-B".\n'
if '-deg' in option:
msg += 'HPC.daligner option "-deg" has changed to "-D".\n'
if '-D' in option:
msg += 'HPC.daligner option "-D" is no longer valid.\n'
if msg:
raise Exception(msg)
def clean_falcon_options(fc):
"""Update some values in fc.
Replace _ with - in a couple places.
"""
keys = ('falcon_sense_option', 'overlap_filtering_setting', 'fc_ovlp_to_graph_option',
)
for key in keys:
update_dash_flags(fc, key)
for dk in ('pa_HPCdaligner_option', 'ovlp_HPCdaligner_option'):
if dk in fc:
check_HPCdaligner_option(fc[dk])
def get_config(config):
"""
This is only for the call from pbsmrtpipe:
upport.get_config(support.parse_config(fn))
We have changed parse_config() to return a dict.
So this is a no-op.
"""
cfg = dict(config) # already a dict now
return cfg
def dict2config(jdict, section):
config = ConfigParser()
if not config.has_section(section):
config.add_section(section)
for (k, v) in jdict.items():
config.set(section, k, str(v))
return config
def parse_config(config_fn):
"""Deprecated.
Called from pbsmrtpipe, for now.
"""
return parse_cfg_file(config_fn)
def parse_cfg_file(config_fn):
"""Return as dict.
"""
with open(config_fn) as stream:
ext = os.path.splitext(config_fn)[1]
if ext in ('.json', '.js'):
config = json.loads(stream.read())
else:
# Parse sections (and case-sensitively), into sub-dicts.
config = parse_cfg_with_sections(stream)
update_defaults(config['General'])
# Copy General section to top, for now.
#for key, val in config['General'].items():
# config[key] = val
##cfg.update(config.get('General', {}))
check_config_sections(config) # Ensure that the right sections exist.
update_job_sections(config)
return config
def process_job_defaults(job_defaults):
key = 'use_tmpdir'
use_tmpdir = job_defaults.get(key, '')
if '/' in use_tmpdir:
tempfile.tempdir = use_tmpdir
os.environ['TMPDIR'] = use_tmpdir
else:
if use_tmpdir.lower().startswith('t'):
use_tmpdir = tempfile.gettempdir()
else:
use_tmpdir = False
job_defaults[key] = use_tmpdir
def update_job_defaults_section(config):
"""For backwards compatibility with stuff from 'General' section.
"""
General = config['General']
job_defaults = config['job.defaults']
if 'njobs' in General:
logger.warning('"njobs" belongs in the [job.defaults] section.')
if 'pwatcher_type' in General:
logger.warning('Please specify "pwatcher_type" only in the [job.defaults] section, not in [General].')
if 'job_type' in General:
logger.warning('Please specify "job_type" only in the [job.defaults] section, not in [General].')
if 'stop_all_jobs_on_failure' in General:
logger.warning('Please specify "stop_all_jobs_on_failure" only in the [job.defaults] section, not in [General].')
if 'use_tmpdir' in General:
logger.warning('Please specify "use_tmpdir" only in the [job.defaults] section, not in [General].')
if 'job_name_style' in General:
logger.warning('Please specify "job_name_style" only in the [job.defaults] section, not in [General].')
if 'job_queue' in General:
logger.warning('Please specify "JOB_QUEUE" only in the [job.defaults] section, not as "job_queue" in [General].')
if 'sge_option' in General:
logger.warning('Please specify "JOB_OPTS" in the [job.defaults] section, not as "sge_option" in [General].')
pwatcher_type = General.get('pwatcher_type', 'fs_based') #, config.get('pwatcher_type')))
job_type = job_defaults.get('job_type', General.get('job_type', '')).lower()
job_queue = General.get('job_queue', '')
sge_option = General.get('sge_option', '')
if 'pwatcher_type' not in job_defaults:
job_defaults['pwatcher_type'] = pwatcher_type
else:
pwatcher_type = job_defaults['pwatcher_type']
if 'submit' not in config['job.defaults']:
if 'blocking' == pwatcher_type:
if not job_queue or ' ' not in job_queue:
raise Exception('pwatcher_type=blocking, but "submit" is not in [job.defaults] section.')
config['job.defaults']['submit'] = job_queue
logger.warning('Please set "submit" in [job.defaults] section. (For now, we will use "job_queue" from [General], which was a hack.)')
elif 'fs_based' == pwatcher_type or 'network_based' == pwatcher_type:
if not job_type:
logger.error('job.defaults.submit is not set; pwatcher_type={}; but job_type is not set. Maybe try "job_type=local" first.'.format(pwatcher_type))
job_type = 'local'
job_defaults['job_type'] = job_type
allowed_job_types = ['sge', 'pbs', 'torque', 'slurm', 'lsf', 'local']
assert job_type in allowed_job_types, 'job_type={} not in {}'.format(
job_type, allowed_job_types)
if job_queue and 'JOB_QUEUE' not in config['job.defaults']:
job_defaults['JOB_QUEUE'] = job_queue
else:
raise Exception('Unknown pwatcher_type={}'.format(pwatcher_type))
#assert 'submit' in config['job.defaults'], repr(config)
if sge_option and 'JOB_OPTS' not in config['job.defaults']:
job_defaults['JOB_OPTS'] = sge_option
if 'njobs' not in job_defaults:
config['job.defaults']['njobs'] = int(General.get('default_concurrent_jobs', 8)) # GLOBAL DEFAULT CONCURRENCY
msg = 'Please supply a default for "njobs" (aka concurrency) in section [job.defaults]. For now, we will use {}'.format(
config['job.defaults']['njobs'])
logger.warning(msg)
def update_if_if(key):
if key not in job_defaults:
if key in General:
job_defaults[key] = General[key]
logger.warning('Found "{}" from [General] section; should be in [job.defaults] instead.'.format(key))
update_if_if('job_name_style')
update_if_if('stop_all_jobs_on_failure')
update_if_if('use_tmpdir')
legacy_names = [
'pwatcher_type', 'pwatcher_directory',
'job_type', 'job_queue', 'job_name_style',
'use_tmpdir',
]
def update_if_missing(name, sub_dict):
if General.get(name) and name not in sub_dict:
sub_dict[name] = General[name]
for name in legacy_names:
update_if_missing(name, config['job.defaults'])
process_job_defaults(job_defaults)
def update_job_sections(config):
"""More for backwards compatibility with stuff from 'General' section.
"""
update_job_defaults_section(config)
General = config['General']
# Update a few where the names change and the section is non-default.
def update_step_job_opts(name):
if General.get('sge_option_'+name) and 'JOB_OPTS' not in config['job.step.'+name]:
config['job.step.'+name]['JOB_OPTS'] = General['sge_option_'+name]
def update_step_njobs(name):
if General.get(name+'_concurrent_jobs') and 'njobs' not in config['job.step.'+name]:
config['job.step.'+name]['njobs'] = int(General[name+'_concurrent_jobs'])
for name in ['bd', 'da', 'la', 'pda', 'pla', 'cns', 'fc', 'asm']:
update_step_job_opts(name)
update_step_njobs(name)
# Prefer 'asm' to 'fc'.
asm = dict(config['job.step.asm'])
config['job.step.asm'] = config['job.step.fc']
del config['job.step.fc']
config['job.step.asm'].update(asm)
def parse_cfg_with_sections(stream):
"""Return as dict of dict of ...
"""
#Experimental:
"""
ConfigParser sections become sub-sub sections when separated by dots.
[foo.bar]
baz = 42
is equivalent to JSON
{"foo": {"bar": {"baz": 42}}}
"""
content = stream.read()
result = dict()
try:
jdict = json.loads(NativeIO(content).read())
return jdict
except ValueError:
pass #logger.exception('Could not parse stream as JSON.')
try:
config = ConfigParser(strict=False)
config.optionxform = str
config.read_file(NativeIO(content))
sections = config.sections()
for sec in sections:
result[sec] = dict(config.items(sec))
return result
except:
raise
def check_config_sections(cfg):
"""And ensure these all exist.
"""
allowed_sections = set(['General',
'job.step.dust',
'job.step.da', 'job.step.pda',
'job.step.la', 'job.step.pla',
'job.step.cns', 'job.step.fc',
'job.step.asm',
'job.defaults',
])
all_sections = set(k for k,v in list(cfg.items()) if isinstance(v, dict))
unexpected = all_sections - allowed_sections
if unexpected:
msg = 'You have {} unexpected cfg sections: {}'.format(
len(unexpected), unexpected)
raise Exception(msg)
# Guarantee they all exist.
for sec in allowed_sections:
if sec not in cfg:
cfg[sec] = dict()
def update_dash_flags(cfg, key):
if key not in cfg:
return
val = cfg[key]
cfg[key] = new_val = functional.dash_flags(cfg[key])
if val != new_val:
msg = '''\
Option contains flags with "_":
"{key}={val}". Those should be "-", as in
"{key}={new_val}". Auto-replaced.'''.format(**locals())
logger.warning(msg)
TEXT_FILE_BUSY = 'avoid_text_file_busy'
def update_defaults(cfg):
"""cfg is probably the General sub-dict.
"""
def set_default(key, val):
if key not in cfg:
cfg[key] = val
set_default('input_type', 'raw')
set_default('overlap_filtering_setting', '--max-diff 1000 --max-cov 1000 --min-cov 2')
#set_default('pa_daligner_option', '-e.70 -s100 -t16') # TODO: -t is a dumb default
#set_default('ovlp_daligner_option', '-e.96 -s1000 -h60 -t32') # TODO: -t is a dumb default
set_default('pa_HPCdaligner_option', '-v')
set_default('ovlp_HPCdaligner_option', '-v -l500')
set_default('pa_HPCTANmask_option', '-l500') # daligner defaults to -l1000
#set_default('ovlp_HPCTANmask_option', '-l500')
set_default('pa_REPmask_code', '0,300/0,300/0,300')
set_default('pa_DBsplit_option', '-x500 -s200 -a')
set_default('skip_checks', False)
set_default('pa_DBdust_option', '') # Gene recommends the defaults. I have tried -w128 -t2.5 -m20
set_default('pa_fasta_filter_option', 'streamed-internal-median')
set_default('pa_subsample_coverage', 0)
set_default('pa_subsample_strategy', 'random')
set_default('pa_subsample_random_seed', 12345)
set_default('dazcon', False)
set_default('pa_dazcon_option', '-j 4 -x -l 500')
set_default('ovlp_DBdust_option', '')
set_default('ovlp_DBsplit_option', '-x500 -s200 -a')
set_default('falcon_sense_option', '--output-multi --min-idt 0.70 --min-cov 2 --max-n-read 1800')
set_default('falcon_sense_skip_contained', False)
set_default('falcon_sense_greedy', False)
set_default('LA4Falcon_preload', '')
set_default('fc_ovlp_to_graph_option', '')
set_default('genome_size', 0)
set_default('seed_coverage', 20)
| |
<filename>cgi_ui/cgi-bin/strf_web.py<gh_stars>0
# -*- coding: utf-8 -*-
# !C:\tools\Python-3.6.2_64\pythonw.exe
# !/usr/local/bin/python3.6
import datetime
import math
import os
import sys
from pathlib import Path
from typing import List, Dict, Union, Tuple
from xml.etree.ElementTree import ParseError
from gemmi.cif import Style
from structurefinder.misc.exporter import cif_data_to_document
###########################################################
### Configure the web server here: #####################
host = "127.0.0.1"
port = "8080"
dbfilename = "structuredb.sqlite"
download_button = False
###########################################################
site_ip = host + ':' + port
try: # Adding local path to PATH
sys.path.insert(0, os.path.abspath('./'))
except(KeyError, ValueError):
print('Unable to set PATH properly. strf_web.py might not work.')
pyver = sys.version_info
if pyver[0] == 3 and pyver[1] < 4:
# Python 2 creates a syntax error anyway.
print("You need Python 3.4 and up in oder to run this program!")
sys.exit()
from shutil import which
from structurefinder.searcher.constants import centering_letter_2_num, centering_num_2_letter
from structurefinder.ccdc.query import get_cccsd_path, search_csd, parse_results
from cgi_ui.bottle import Bottle, static_file, template, redirect, request, response, HTTPResponse
from structurefinder.displaymol.mol_file_writer import MolFile
from structurefinder.displaymol.sdm import SDM
from structurefinder.pymatgen.core import lattice
from structurefinder.searcher.database_handler import StructureTable
from structurefinder.searcher.misc import is_valid_cell, get_list_of_elements, vol_unitcell, is_a_nonzero_file, \
format_sum_formula, \
combine_results, more_results_parameters, regular_results_parameters
app = application = Bottle()
@app.get('/all')
def structures_list_data():
"""
The content of the structures list.
"""
structures = StructureTable(dbfilename)
return get_structures_json(structures, show_all=True)
@app.get('/')
def main():
"""
The main web site with html template.
"""
response.set_header('Set-Cookie', 'str_id=')
response.content_type = 'text/html; charset=UTF-8'
data = {"my_ip" : site_ip,
"title" : 'StructureFinder',
'host' : host,
'download_link': r"""<p><a href="http://{}/dbfile.sqlite" download="structurefinder.sqlite"
type="application/*">Download
database file</a></p>""".format(site_ip) if download_button else ''
}
output = template('./cgi_ui/views/strf_web', data)
return output
@app.get('/dbfile.sqlite')
def get_dbfile():
return Path(dbfilename).read_bytes()
@app.get("/cellsrch")
def cellsrch():
cell_search = request.GET.cell_search
more_results = (request.GET.more == "true")
sublattice = (request.GET.supercell == "true")
cell = is_valid_cell(cell_search)
print("Cell search:", cell)
structures = StructureTable(dbfilename)
if cell:
ids = find_cell(structures, cell, more_results=more_results, sublattice=sublattice)
print("--> Got {} structures from cell search.".format(len(ids)))
return get_structures_json(structures, ids)
@app.get("/txtsrch")
def txtsrch():
structures = StructureTable(dbfilename)
text_search = request.GET.text_search
print("Text search:", text_search)
ids = search_text(structures, text_search)
return get_structures_json(structures, ids)
@app.get("/adv_srch")
def adv():
elincl = request.GET.elements_in
elexcl = request.GET.elements_out
date1 = request.GET.date1
date2 = request.GET.date2
cell_search = request.GET.cell_search
txt_in = request.GET.text_in
txt_out = request.GET.text_out
if len(txt_in) >= 2 and "*" not in txt_in:
txt_in = '*' + txt_in + '*'
if len(txt_out) >= 2 and "*" not in txt_out:
txt_out = '*' + txt_out + '*'
more_results = (request.GET.more == "true")
sublattice = (request.GET.supercell == "true")
onlyelem = (request.GET.onlyelem == "true")
it_num = request.GET.it_num
r1val = request.GET.r1val
ccdc_num = request.GET.ccdc_num
structures = StructureTable(dbfilename)
print("Advanced search: elin:", elincl, 'elout:', elexcl, date1, '|', date2, '|', cell_search, 'txin:', txt_in,
'txout:', txt_out, '|', 'more:', more_results, 'Sublatt:', sublattice, 'It-num:', it_num, 'only:', onlyelem,
'CCDC:', ccdc_num)
ids = advanced_search(cellstr=cell_search, elincl=elincl, elexcl=elexcl, txt=txt_in, txt_ex=txt_out,
sublattice=sublattice, more_results=more_results, date1=date1, date2=date2,
structures=structures, it_num=it_num, onlythese=onlyelem, r1val=r1val, ccdc_num=ccdc_num)
print("--> Got {} structures from Advanced search.".format(len(ids)))
return get_structures_json(structures, ids)
@app.post('/molecule')
def jsmol_request():
"""
A request for atom data from jsmol.
"""
str_id = request.POST.id
print("Molecule id:", str_id)
structures = StructureTable(dbfilename)
if str_id:
cell = structures.get_cell_by_id(str_id)
if request.POST.grow == 'true':
symmcards = [x.split(',') for x in structures.get_row_as_dict(str_id)
['_space_group_symop_operation_xyz'].replace("'", "").replace(" ", "").split("\n")]
atoms = structures.get_atoms_table(str_id, cartesian=False, as_list=True)
if atoms:
sdm = SDM(atoms, symmcards, cell)
needsymm = sdm.calc_sdm()
atoms = sdm.packer(sdm, needsymm)
else:
atoms = structures.get_atoms_table(str_id, cartesian=True, as_list=False)
try:
m = MolFile(atoms)
return m.make_mol()
except(KeyError, TypeError) as e:
print('Exception in jsmol_request: {}'.format(e))
return ''
@app.post('/residuals')
def post_request():
"""
Handle POST requests.
"""
cif_dic = {}
str_id = request.POST.id
response.set_header('Set-Cookie', 'str_id=' + str_id)
resid1 = request.POST.residuals1 == 'true'
resid2 = request.POST.residuals2 == 'true'
all_cif = request.POST.all == 'true'
unitcell = request.POST.unitcell
structures = StructureTable(dbfilename)
print("Structure id:", str_id)
if str_id:
cif_dic = structures.get_row_as_dict(str_id)
if str_id and unitcell and not (resid1 or resid2 or all_cif):
try:
return get_cell_parameters(structures, str_id)
except ValueError as e:
print("Exception raised:")
print(e)
return ''
if str_id and resid1:
return get_residuals_table1(structures, cif_dic, str_id)
if str_id and resid2:
return get_residuals_table2(cif_dic)
if str_id and all_cif:
return get_all_cif_val_table(structures, str_id)
# noinspection PyUnresolvedReferences
@app.route('/static/<filepath:path>')
def server_static(filepath):
"""
Static files such as images or CSS files are not served automatically.
The static_file() function is a helper to serve files in a safe and convenient way (see Static Files).
This example is limited to files directly within the /path/to/your/static/files directory because the
<filename> wildcard won’t match a path with a slash in it. To serve files in subdirectories, change
the wildcard to use the path filter:
"""
response = static_file(filepath, root='./cgi_ui/static/')
response.set_header("Cache-Control", "public, max-age=240")
return response
@app.route('/version')
def version():
from structurefinder.misc.version import VERSION
return 'version ' + str(VERSION)
@app.get('/cellcheck')
def cellsearch():
if sys.platform == 'win32':
if not get_cccsd_path():
return 'false'
else:
return 'true'
else:
try:
if which('ccdc_searcher') or \
Path('/opt/CCDC/CellCheckCSD/bin/ccdc_searcher').exists():
print('CellCheckCSD found')
return 'true'
except TypeError:
return 'false'
@app.route('/favicon.ico')
def redirect_to_favicon():
redirect('/static/favicon.ico')
@app.route('/current-cif/<structure_id:int>')
def download_currently_selected_cif(structure_id):
if not download_button:
return 'Downloading a CIF was turned off by the administrator.'
headers = dict()
structures = StructureTable(dbfilename)
cif_data = structures.get_cif_export_data(structure_id)
doc = cif_data_to_document(cif_data)
file = doc.as_string(style=Style.Indent35)
headers['Content-Type'] = 'text/plain'
headers['Content-Encoding'] = 'ascii'
headers['Content-Length'] = len(file)
now = datetime.datetime.now()
lm = now.strftime("%a, %d %b %Y %H:%M:%S GMT")
headers['Last-Modified'] = lm
return HTTPResponse(file, **headers)
@app.get('/csd')
def show_cellcheck():
"""
Shows the CellcheckCSD web page
"""
structures = StructureTable(dbfilename)
str_id = request.get_cookie('str_id')
centering = ''
if str_id:
cell = structures.get_cell_by_id(str_id)
cif_dic = structures.get_row_as_dict(str_id)
try:
centering = cif_dic['_space_group_centring_type']
except KeyError:
centering = ''
cellstr = '{:>8.3f} {:>8.3f} {:>8.3f} {:>8.3f} {:>8.3f} {:>8.3f}'.format(*cell)
else:
cellstr = ''
if centering:
try:
cent = centering_letter_2_num[centering]
except KeyError: # mostly value of '?'
cent = 0
else:
cent = 0
response.content_type = 'text/html; charset=UTF-8'
data = {"my_ip" : site_ip,
"title" : 'StructureFinder',
'cellstr': cellstr,
'strid' : str_id,
'cent' : cent,
'host' : host, }
output = template('./cgi_ui/views/cellcheckcsd', data)
return output
@app.post('/csd-list')
def search_cellcheck_csd():
"""
Search with CellcheckCSD.
"""
cmd = request.POST.cmd
cell = request.POST.cell
str_id = request.POST.str_id
if not cell:
return {}
cent = request.POST.centering
if len(cell) < 6:
return {}
if cmd == 'get-records' and len(cell.split()) == 6:
xml = search_csd(cell.split(), centering=centering_num_2_letter[int(cent)])
# print(xml)
try:
results = parse_results(xml) # results in a dictionary
except ParseError as e:
print(e)
return
# print(results)
if str_id:
structures = StructureTable(dbfilename)
print(len(results), 'Structures found...')
return {"total": len(results), "records": results, "status": "success"}
else:
return {}
@app.error(404)
def error404(error):
"""
Redefine 404 message.
"""
return '''<div style="text-align: center;">
<b>Nothing here, sorry.</b><br>
<p>
<a href="http://{}{}/">Back to main page</a>
</p>
</div>
'''.format(host, ':' + port)
def is_ajax():
if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
return True
else:
return False
def get_structures_json(structures: StructureTable, ids: (list, tuple) = None, show_all: bool = False) -> dict:
"""
Returns the next package of table rows for continuos scrolling.
"""
if not ids and not show_all:
return {}
dic = structures.get_all_structures_as_dict(ids)
number = len(dic)
print("--> Got {} structures from actual search.".format(number))
if number == 0:
return {}
return {"total": number, "records": dic, "status": "success"}
def get_cell_parameters(structures: StructureTable, strid: str) -> str:
"""
Resturns unit cell parameters as html formated string.
"""
c = structures.get_cell_by_id(strid)
cstr = """<b>Unit Cell:</b>
<i>a</i> = {0:>8.3f} Å,
<i>b</i> = {1:>8.3f} Å,
<i>c</i> = {2:>8.3f} Å,
<i>α</i> = {3:>8.3f}°,
<i>β</i> = {4:>8.3f}°,
<i>γ</i> = {5:>8.3f}°,
<i>V</i> = {6} Å<sup>3</sup>
<div style="font-size:0pt" id='hidden-cell'>{0} {1} {2} {3} {4} {5}</div>
""".format(c[0], c[1], c[2], c[3], c[4], c[5], round(c[6], 2))
return cstr
def get_residuals_table1(structures: StructureTable, cif_dic: dict, structure_id: int) -> str:
"""
Returns a table with the most important residuals of a structure.
"""
try:
rsigma = " / {}".format(cif_dic['_diffrn_reflns_av_unetI_netI'])
except (TypeError, ValueError):
rsigma = " "
if not cif_dic:
return ""
if cif_dic['_refine_diff_density_max']:
peakhole = "{} / {}".format(cif_dic['_refine_diff_density_max'], cif_dic['_refine_diff_density_min'])
else:
peakhole = " "
try:
sumform = format_sum_formula(structures.get_calc_sum_formula(structure_id), break_after=99)
except KeyError:
sumform = ''
if sumform == '':
# Display this as last resort:
sumform = cif_dic['_chemical_formula_sum']
table1 = """
<table class="table table-bordered table-condensed" id='resitable1'>
<tbody>
<tr><td style='width: 40%'><b>Space Group</b></td> <td>{0}</td></tr>
<tr><td><b>Z</b></td> <td>{1}</td></tr>
<tr><td><b>Sum Formula</b></td> <td>{2}</td></tr>
<tr><td><b>Temperature [K]</b></td> <td>{3}</td></tr>
<tr><td><b><i>wR</i><sub>2</sub></b></td> <td>{4}</td></tr>
<tr><td><b><i>R<i/><sub>1</sub></b></td> <td>{5}</td></tr>
<tr><td><b>Goof</b></td> <td>{6}</td></tr>
<tr><td><b>Max Shift/esd</b></td> <td>{7}</td></tr>
<tr><td><b>Peak / Hole [eÅ<sup>−3</sup>]</b></td> <td>{8}</td></tr>
<tr><td><b><i>R</i><sub>int</sub> / <i>R</i><sub>σ</sub></b></b></td> <td>{9}{10} </td></tr>
<tr><td><b>Wavelength [Å]</b></td> <td>{11}</td></tr>
</tbody>
</table>
""".format(cif_dic['_space_group_name_H_M_alt'],
cif_dic['_cell_formula_units_Z'],
sumform,
cif_dic['_diffrn_ambient_temperature'],
cif_dic['_refine_ls_wR_factor_ref'] if cif_dic['_refine_ls_wR_factor_ref'] else cif_dic[
'_refine_ls_wR_factor_gt'],
cif_dic['_refine_ls_R_factor_gt'] if cif_dic['_refine_ls_R_factor_gt'] else cif_dic[
'_refine_ls_R_factor_all'],
cif_dic['_refine_ls_goodness_of_fit_ref'],
cif_dic['_refine_ls_shift_su_max'],
peakhole,
cif_dic['_diffrn_reflns_av_R_equivalents'],
rsigma,
cif_dic['_diffrn_radiation_wavelength']
)
return table1
def get_residuals_table2(cif_dic: dict) -> str:
"""
Returns a table with the most important residuals of a structure.
"""
if not cif_dic:
return ""
wavelen = cif_dic['_diffrn_radiation_wavelength']
thetamax = cif_dic['_diffrn_reflns_theta_max']
thetafull = cif_dic['_diffrn_reflns_theta_full']
# d = lambda/2sin(theta):
try:
d = wavelen / (2 * math.sin(math.radians(thetamax)))
except(ZeroDivisionError, TypeError):
d = 0.0
try:
compl = cif_dic['_diffrn_measured_fraction_theta_max'] * 100
| |
found in another datagroup it may be changed.
This will be used to navigate data_file and find the
correct final image. (Default: 'StitchedImage')
mode: str
Mode determines what color, quality and
how many images are saved.
Possible values for mode: save_ubyte, save_float,
save_rgb. If another or no value is given the image
is saved as is and a as a low quality copy
(pixel depth 8 bits) (Default: 'both')
"""
# Save the results:
if mode == 'save_ubyte':
inout.save_image(data_file, hyb_nr, gene, pre_proc_level, 'final_image_ubyte', location_image + '_byte')
elif mode == 'save_float':
inout.save_image(data_file, hyb_nr, gene, pre_proc_level, 'final_image', location_image)
elif mode == 'save_rgb':
inout.save_image(data_file, hyb_nr, gene, pre_proc_level, 'final_image_rgb', location_image + '_rgb')
else:
inout.save_image(data_file, hyb_nr, gene, pre_proc_level, 'final_image_ubyte', location_image + '_byte')
inout.save_image(data_file, hyb_nr, gene, pre_proc_level, 'final_image', location_image)
def plot_final_image(im_file_name, joining, hyb_nr = 1,
gene = 'Nuclei', fig_name = "final image",
shrink_image = False, block = True):
"""Displays the high quality final image in a plot window.
Takes a lot of working memory for full sized images.
When plt_available is false this function does nothing and returns
None.
Parameters:
-----------
im_file_name: str
Filename of the hdf5 file, containing the final image.
fig_name: str
Name of the plotting window (default: "final image").
shrink_image: bool
Turn on shrink_image to reduce display quality and memory usage. (Default: False)
block: bool
Plot blocks the running program untill
the plotting window is closed if true. Turn off
block to make the code continue untill the next call
of plt.show(block=True) before displaying the
image. (default: True)
"""
if plt_available:
if isinstance(im_file_name, str):
# Load the image from file
im_file = h5py.File(im_file_name + '_Hybridization' +
str(hyb_nr) + '.sf.hdf5', 'r')
for_display = im_file['final_image']
else:
# Load the image from file
for_display = im_file_name[gene] \
['StitchedImage']['final_image']
# Shrink the image if necessary
if shrink_image:
display_size = np.array(joining['final_image_shape'],
dtype = int)/10
logger.debug("display size pixels: {}".format(display_size))
for_display = smtf.resize(for_display, tuple(display_size))
# Plot the image
if for_display.ndim == 3:
inout.plot_3D(for_display)
else:
plt.figure(fig_name)
plt.imshow(for_display, 'gray', interpolation = 'none')
plt.show(block = False)
# Load the image from file
if isinstance(im_file_name, str):
# Load the image from file
im_file = h5py.File(im_file_name + '.hdf5', 'r')
for_display = im_file['temp_mask']
else:
for_display = im_file_name['Hybridization' + str(hyb_nr)][gene] \
['StitchedImage']['temp_mask']
# Shrink the image if necessary
if shrink_image:
display_size = np.array(joining.final_image_shape, dtype=int) / 10
logger.debug("display size pixels: {}".format(display_size))
for_display = smtf.resize(for_display, tuple(display_size))
# Plot the image
plt.figure(fig_name + ' mask')
plt.imshow(for_display, 'gray', interpolation='none')
plt.show(block = block)
else:
return None
def get_pairwise_input_npy(image_properties,converted_positions, hybridization,
est_overlap, y_flip = False, nr_dim = 2):
"""Get the information necessary to do the pairwise allignment
Modified version of the get_pairwise_input functions that work on .npy
files and not on hdf5
Find the pairwise pairs for an unknown stitching.
Parameters:
-----------
image_properties: dict
Dictionary with the image details parsed from the Experimental_metadata.yaml file
converted_positions: dict
Dictionary with the coords of the images for all hybridization
The coords are a list of floats
hybridization: str
Hybridization that will be processed (Ex. Hybridization2)
est_overlap: float
The fraction of two neighbours that should
overlap, this is used to estimate the shape of the
tile set and then overwritten by the actual average
overlap according to the microscope coordinates.
(default: 0.1)
y_flip: bool
The y_flip variable is designed for the cases where the
microscope sequence is inverted in the y-direction. When
set to True the y-coordinates will also be inverted
before determining the tile set. (Default: False)
nr_dim: int
If 3, the code will assume three dimensional data
for the tile, where z is the first dimension and y and x
the second and third. For any other value 2-dimensional data
is assumed. (Default: 2)
Returns:
--------
tiles: np.array
Array of int with the tiles number. -1 indicate an empty tile
contig_tuples: list
List of tuples. Each tuple is a tile pair.
Tuples contain two tile indexes denoting these
tiles are contingent to each other.
nr_pixels: int
Height and length of the tile in pixels, tile is assumed to be square.
z_count: int
The number of layers in one tile (size of
the z-axis). Is 1 when nr_dim is not 3.
micData: object
MicroscopeData object. Contains coordinates of
the tile corners as taken from the microscope.
"""
# Get coordinate data for this hybridization
coord_data = converted_positions[hybridization]
# Read the number of pixels, z-count and pixel size from the yaml
# file.
try:
nr_pixels = image_properties['HybImageSize']['rows']
except KeyError as err:
logger.info(("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of pixels in an image "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> rows.\n"
+ "KeyError: {}").format(err))
raise
if nr_dim == 2:
z_count = 1
else:
try:
z_count = image_properties['HybImageSize']['zcount']
except KeyError as err:
logger.info(("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of slices in the z-stack "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> zcount.\n"
+ "KeyError: {}")
.format(err))
raise
try:
pixel_size = image_properties['PixelSize']
except KeyError as err:
logger.info(("ImageProperties['PixelSize'] not found in "
+ "experimental metadata file.\nPlease add the "
+ "size of a pixel in um in the experimental "
+ "metadata file under ImageProperties "
+ "--> PixelSize.\nKeyError: {}").format(err))
raise
# Estimate the overlap in pixels with the overlap that the user
# provided, default is 10%
est_x_tol = nr_pixels * (1 - est_overlap)
logger.info("Estimating overlap at {}%, that is {} pixels"
.format(est_overlap * 100, est_x_tol))
logger.debug("Number of pixels: {}".format(nr_pixels))
logger.debug("Number of slices in z-stack: {}".format(z_count))
# Organize the microscope data and determine tile set
micData = MicroscopeData(coord_data, y_flip, nr_dim)
micData.normalize_coords(pixel_size)
micData.make_tile_set(est_x_tol, nr_pixels = nr_pixels)
# Make a list of image numbers, matching with the numbers in the
# image files
flat_tile_set = micData.tile_set.flat[:]
image_list = [micData.tile_nr[ind] if ind >= 0 else -1 for ind in flat_tile_set]
image_list = np.ma.masked_equal(image_list, -1)
logger.info("Getting references for: {}".format(image_list))
# Make a list of the image names (-1 is a missing tile)
tiles = image_list.data
# Produce an undirected graph of the tiles, tiles that are
# neighbours to each other are connected in this graph.
# noinspection PyPep8Naming
C = np.asarray(sklim.grid_to_graph(*micData.tile_set.shape).todense())
np.fill_diagonal(C, 0)
# noinspection PyPep8Naming
C = np.triu( C )
# Extract the neighbour pairs from the graph
contig_tuples =list(zip( *np.where( C ) ))
logger.info(("Length contingency tuples: {} \n"
+ "Contingency tuples: {}")
.format(len(contig_tuples), contig_tuples))
return(tiles, contig_tuples, nr_pixels, z_count, micData)
def get_place_tile_input_apply_npy(hyb_dir,stitched_reference_files_dir,data_name,image_properties,nr_dim=2):
"""
Modified version of the get_place_tile_input_apply
Get the data needed to apply stitching to another gene
Parameters:
-----------
hyb_dir: str
String representing the path of the folder containing
the tile file, the stitching data file the yaml metadata file.
stitched_reference_files_dir: str
String representing the path of the folder containing the registered data.
data_name: str
Name of the file containing the pickled stitching data.
image_properties: dict
Dictionary with the image details parsed from the Experimental_metadata.yaml file
nr_dim: int
If 3, the code will assume three dimensional data
for the tile, where z is the first dimension and y and x
the second and third. For any other value 2-dimensional data
is assumed. (Default: 2)
Returns:
--------
joining: dict
Taken from the stitching data file.
Contains keys corner_list and final_image_shape.
Corner_list is a list of list, each list is a pair
of an image number (int) and it's coordinates (numpy
array containing floats).
Final_image_shape is a tuple of size 2 or 3
depending on the numer of dimensions and contains
ints.
tiles: list
List of strings. List of references to the the tiles in the hdf5 file tile_file.
nr_pixels: int
Height and length of the tile in pixels, tile is assumed to be square.
z_count: int
The number of layers in one tile (size of the z-axis). Is 1 when nr_dim is not 3.
micData: object
MicroscopeData object. Taken from the | |
<filename>sem_seg/get_info.py
import os
import re
import sys
import time
import copy
import math
import argparse
import numpy as np
import scipy as scp
import open3d as o3d
from natsort import natsorted
import matplotlib.pyplot as plt
from scipy.spatial import distance
from plyfile import PlyData, PlyElement
from mpl_toolkits.mplot3d import Axes3D
from skimage.morphology import skeletonize
'''
- python get_info.py --path_projections data/ --path_models valve_targets/ --path_cls 4.txt
'''
def info_to_ply(info, path_out):
info_pipes_list = info[0]
info_connexions_list = info[1]
info_valves_list = info[2]
pipe_ply = list() # X Y Z R G B A
startend_ply = list() # X Y Z R G B A
elbow_ply = list() # X Y Z R G B A
vector1_ply = list() # X Y Z R G B A
vector2_ply = list() # V1 V2
connexion_ply = list() # X Y Z R G B A
valve_ply = list() # X Y Z R G B A
for i, pipe_info in enumerate(info_pipes_list):
pipe_list = list(pipe_info[0])
pipe_list.pop(0)
pipe_list.pop(-1)
pipe_ply = pipe_ply + pipe_list
startend_ply.append(pipe_info[0][0])
startend_ply.append(pipe_info[0][-1])
elbow_ply = elbow_ply + pipe_info[1]
if len(pipe_info[1]) == 0:
point1 = pipe_info[0][0]
point2 = pipe_info[0][0]+pipe_info[2][0]
vector1_ply.append(point1)
vector1_ply.append(point2)
else:
point1 = pipe_info[0][0]
point2 = pipe_info[0][0]+pipe_info[2][0]
vector1_ply.append(point1)
vector1_ply.append(point2)
for i, elbow in enumerate(pipe_info[1]):
point1 = elbow
point2 = elbow + pipe_info[2][i+1]
vector1_ply.append(point1)
vector1_ply.append(point2)
for i, connexion_info in enumerate(info_connexions_list):
connexion_ply.append(connexion_info[0])
for i, valve_info in enumerate(info_valves_list):
valve_ply.append(valve_info[0])
point1 = valve_info[0]-(valve_info[2]/2)
point2 = valve_info[0]+(valve_info[2]/2)
vector1_ply.append(point1)
vector1_ply.append(point2)
pipe_ply_np = np.round(np.array(pipe_ply), 5)
pipe_color = np.array([[0, 255, 0],]*pipe_ply_np.shape[0])
pipe_ply_np_color = np.hstack((pipe_ply_np, pipe_color))
startend_ply_np = np.round(np.array(startend_ply), 5)
startend_color = np.array([[0, 150, 0],]*startend_ply_np.shape[0])
startend_ply_np_color = np.hstack((startend_ply_np, startend_color))
elbow_ply_np = np.round(np.array(elbow_ply), 2)
elbow_color = np.array([[255, 0, 0],]*elbow_ply_np.shape[0])
elbow_ply_np_color = np.hstack((elbow_ply_np, elbow_color))
connexion_ply_np= np.round(np.array(connexion_ply), 2)
connexion_color = np.array([[0, 0, 0],]*connexion_ply_np.shape[0])
connexion_ply_np_color = np.hstack((connexion_ply_np, connexion_color))
valve_ply_np = np.round(np.array(valve_ply), 5)
valve_color = np.array([[0, 0, 255],]*valve_ply_np.shape[0])
valve_ply_np_color = np.hstack((valve_ply_np, valve_color))
vector1_ply_np = np.round(np.array(vector1_ply), 5)
vector1_color = np.array([[150, 150, 150],]*vector1_ply_np.shape[0])
vector1_ply_np_color = np.hstack((vector1_ply_np, vector1_color))
pipe_ply = list(pipe_ply_np_color)
startend_ply = list(startend_ply_np_color)
elbow_ply = list(elbow_ply_np_color)
connexion_ply = list(connexion_ply_np_color)
valve_ply = list(valve_ply_np_color)
vector1_ply = list(vector1_ply_np_color)
vertex = pipe_ply + startend_ply + elbow_ply + connexion_ply + valve_ply + vector1_ply
vertex_np = np.array(vertex)
disscount = vector1_ply_np.shape[0]-1
last_idx = vertex_np.shape[0]-1
for i in range(int(vector1_ply_np.shape[0]/2)):
vector_idxs = np.array([last_idx-disscount,last_idx-disscount+1])
vector2_ply.append(vector_idxs)
disscount -=2
vector2_ply_np = np.array(vector2_ply)
f = open(path_out, 'w')
f.write("ply" + '\n')
f.write("format ascii 1.0" + '\n')
f.write("comment VCGLIB generated" + '\n')
f.write("element vertex " + str(vertex_np.shape[0]) + '\n')
f.write("property float x" + '\n')
f.write("property float y" + '\n')
f.write("property float z" + '\n')
f.write("property uchar red" + '\n')
f.write("property uchar green" + '\n')
f.write("property uchar blue" + '\n')
f.write("element face 0" + '\n')
f.write("property list uchar int vertex_indices" + '\n')
f.write("element edge " + str(vector2_ply_np.shape[0]) + '\n')
f.write("property int vertex1" + '\n')
f.write("property int vertex2" + '\n')
f.write("end_header" + '\n')
for row in range(vertex_np.shape[0]):
line = ' '.join(map(str, vertex_np[row, :-3])) + ' ' + str(int(vertex_np[row, 3]))+ ' ' + str(int(vertex_np[row, 4])) + ' ' + str(int(vertex_np[row, 5])) +'\n'
f.write(line)
for row in range(vector2_ply_np.shape[0]):
line = str(int(vector2_ply_np[row, 0]))+ ' ' + str(int(vector2_ply_np[row, 1])) +'\n'
f.write(line)
f.close()
def info_to_array(info):
info_pipes_list = info[0]
pipe_inst_list = info[3]
info_connexions_list = info[1]
info_valves_list = info[2]
inst = 0
info_list = list()
for i, pipe_info in enumerate(info_pipes_list):
skeleton = pipe_info[0]
pipe_color = np.array([[0, 255, 0],]*skeleton.shape[0])
skeleton = np.hstack((skeleton, pipe_color))
skeleton = np.insert(skeleton, 6, values=0, axis=1) # insert type 0 - skeleton
skeleton = np.insert(skeleton, 7, values=0, axis=1) # insert info 0 - nothing
if len(pipe_info[1]) > 0:
elbows = np.array(pipe_info[1])
elbows = np.round(elbows, 2)
elbow_color = np.array([[255, 0, 0],]*elbows.shape[0])
elbows = np.hstack((elbows, elbow_color))
elbows = np.insert(elbows, 6, values=1, axis=1) # insert type 1 - elbow
elbows = np.insert(elbows, 7, values=0, axis=1) # insert info 0 - nothing
vector_list = list()
vp1 = pipe_info[0][0]
vp2 = pipe_info[0][0]+pipe_info[2][0]
vector_list.append(vp1)
vector_list.append(vp2)
for i, elbow in enumerate(pipe_info[1]):
vp1 = elbow
vp2 = elbow + pipe_info[2][i+1]
vector_list.append(vp1)
vector_list.append(vp2)
vectors = np.array(vector_list)
vector_color = np.array([[30, 30, 30],]*vectors.shape[0])
vectors = np.hstack((vectors, vector_color))
vectors = np.insert(vectors, 6, values=2, axis=1) # insert type 2 - vector
vectors = np.insert(vectors, 7, values=0, axis=1) # insert info 0 - nothing
belonging_insts_list = list()
for i, belonging_inst_idx in enumerate(pipe_info[3]):
belonging_inst = np.append(pipe_info[0][0], [0, 255, 0, 7, belonging_inst_idx]) # insert color, type 7 - belonging inst and info - belonging inst idx
belonging_insts_list.append(belonging_inst)
belonging_insts = np.array(belonging_insts_list)
if len(pipe_info[1]) > 0:
pipe = np.vstack((skeleton,elbows,vectors,belonging_insts))
else:
pipe = np.vstack((skeleton,vectors,belonging_insts))
pipe = np.insert(pipe, 8, values=0, axis=1) # insert class 0 - pipe
pipe = np.insert(pipe, 9, values=inst, axis=1) # insert inst
info_list.append(pipe)
inst += 1
for i, pipe_inst in enumerate(pipe_inst_list):
data = pipe_inst[:,0:3]
inst_color = np.array([[0, 150, 0],]*data.shape[0])
data = np.hstack((data, inst_color))
data = np.insert(data, 6, values=6, axis=1) # insert type 6 - inst data
data = np.insert(data, 7, values=i, axis=1) # insert info i - instance number
data = np.insert(data, 8, values=0, axis=1) # insert class 0 - pipe
data = np.insert(data, 9, values=inst, axis=1) # insert inst
info_list.append(data)
inst += 1
for i, valve_info in enumerate(info_valves_list):
central = np.append(valve_info[0], [0, 0, 255, 3, 0]) # insert color, type 3 - central point and info 0 - nothing
vp1 = valve_info[0]-(valve_info[2]/2)
vp1 = np.append(vp1, [127,127,127, 2, 0]) # insert color, type 2 - vector and info 0 - nothing
vp2 = valve_info[0]+(valve_info[2]/2)
vp2 = np.append(vp2, [127,127,127, 2, 0]) # insert color, type 2 - vector and info 0 - nothing
max_id = np.append(valve_info[0], [0, 0, 255, 5, valve_info[3]]) # insert color, type 5 - max_id and info - max id
if len(valve_info[5]) > 0:
near_pipes_list = list()
for i, near_pipe_idx in enumerate(valve_info[5]):
near_pipe = np.append(valve_info[0], [0, 0, 255, 4, near_pipe_idx]) # insert color, type 4 - near pipe and info - near_pipe_idx
near_pipes_list.append(near_pipe)
near_pipes = np.array(near_pipes_list)
valve = np.vstack((central,vp1,vp2,max_id,near_pipes))
else:
valve = np.vstack((central,vp1,vp2,max_id))
valve = np.insert(valve, 8, values=1, axis=1) # insert class 1 - valve
valve = np.insert(valve, 9, values=inst, axis=1) # insert inst
info_list.append(valve)
inst += 1
for i, connexion_info in enumerate(info_connexions_list):
central = np.append(connexion_info[0], [0, 0, 0, 3, 0]) # insert color, type 3 - central point and info - nothing
near_pipes_list = list()
for i, near_pipe_idx in enumerate(connexion_info[1]):
near_pipe = np.append(connexion_info[0], [0, 0, 0, 4, near_pipe_idx]) # insert color, type 4 - near pipe and info - near_pipe_idx
near_pipes_list.append(near_pipe)
near_pipes = np.array(near_pipes_list)
connexion = np.vstack((central,near_pipes))
connexion = np.insert(connexion, 8, values=2, axis=1) # insert class 2 - connexion
connexion = np.insert(connexion, 9, values=inst, axis=1) # insert inst
info_list.append(connexion)
inst += 1
info_array = np.array(info_list)
info_array = np.vstack(info_array)
return info_array
def get_info_classes(cls_path):
classes = []
colors = []
for line in open(cls_path):
data = line.split()
classes.append(data[0])
colors.append([int(data[1]), int(data[2]), int(data[3])])
labels = {cls: i for i, cls in enumerate(classes)}
label2color = {classes.index(cls): colors[classes.index(cls)] for cls in classes}
return classes, labels, label2color
def angle_between_vectors(v1, v2):
v1_u = v1/np.linalg.norm(v1)
v2_u = v2/np.linalg.norm(v2)
angle = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
return np.degrees(angle)
def get_distance(p1, p2, dim):
if dim == 2:
d = math.sqrt(((p2[0]-p1[0])**2)+((p2[1]-p1[1])**2))
if dim == 3:
d = math.sqrt(((p2[0]-p1[0])**2)+((p2[1]-p1[1])**2)+((p2[2]-p1[2])**2))
return d
def numpy_unique(arr):
_, index = np.unique(arr, axis=0,return_index=True)
arr_unique = arr[np.sort(index)]
return arr_unique
def read_ply(filename, type):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
if type == "proj":
pc_array = np.array([[x, y, z, r, g, b, c, i] for x,y,z,r,g,b,c,i in pc])
if type == "model":
pc_array = np.array([[x, y, z, nx, ny ,nz, r, g, b] for x,y,z,nx,ny,nz,r,g,b in pc])
return pc_array
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
def print_o3d(pc):
pc_temp = copy.deepcopy(pc)
o3d.visualization.draw_geometries([pc_temp])
def preprocess_point_cloud(pcd, radius_feature):
#print(":: Compute FPFH feature with search radius %.3f." % radius_feature)
#print("--fpfh")
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
pcd,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
return pcd, pcd_fpfh
def execute_global_registration(source, target, source_fpfh,
target_fpfh, distance_threshold):
#print(":: RANSAC registration on downsampled point clouds.")
#print(" we use a liberal distance threshold %.3f." % distance_threshold)
result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(
source, target, source_fpfh, target_fpfh, True,
distance_threshold,
o3d.pipelines.registration.TransformationEstimationPointToPoint(False),
3, [
o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(
0.9),
o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(
distance_threshold)
], o3d.pipelines.registration.RANSACConvergenceCriteria(100000, 0.999))
return result
def match(source, target):
| |
dose 50
Bioassay, Embryo Infective Dose 50
"""
zero029 = CodeSystemConcept(
{
"code": "0029",
"definition": "Bioassay, Embryo Lethal Dose 50",
"display": "Embryo lethal dose 50",
}
)
"""
Embryo lethal dose 50
Bioassay, Embryo Lethal Dose 50
"""
zero030 = CodeSystemConcept(
{
"code": "0030",
"definition": "Bioassay, Mouse intercerebral inoculation",
"display": "Mouse intercerebral inoculation",
}
)
"""
Mouse intercerebral inoculation
Bioassay, Mouse intercerebral inoculation
"""
zero031 = CodeSystemConcept(
{
"code": "0031",
"definition": "Bioassay, qualitative",
"display": "Bioassay, qualitative",
}
)
"""
Bioassay, qualitative
Bioassay, qualitative
"""
zero032 = CodeSystemConcept(
{
"code": "0032",
"definition": "Bioassay, quantitative",
"display": "Bioassay, quantitative",
}
)
"""
Bioassay, quantitative
Bioassay, quantitative
"""
zero033 = CodeSystemConcept(
{"code": "0033", "definition": "Chemical", "display": "Chemical method"}
)
"""
Chemical method
Chemical
"""
zero034 = CodeSystemConcept(
{
"code": "0034",
"definition": "Chemical, Differential light absorption",
"display": "Differential light absorption chemical test",
}
)
"""
Differential light absorption chemical test
Chemical, Differential light absorption
"""
zero035 = CodeSystemConcept(
{"code": "0035", "definition": "Chemical, Dipstick", "display": "Dipstick"}
)
"""
Dipstick
Chemical, Dipstick
"""
zero036 = CodeSystemConcept(
{
"code": "0036",
"definition": "Chemical, Dipstick colorimetric laboratory test",
"display": "Dipstick colorimetric laboratory test",
}
)
"""
Dipstick colorimetric laboratory test
Chemical, Dipstick colorimetric laboratory test
"""
zero037 = CodeSystemConcept(
{"code": "0037", "definition": "Chemical, Test strip", "display": "Test strip"}
)
"""
Test strip
Chemical, Test strip
"""
zero038 = CodeSystemConcept(
{"code": "0038", "definition": "Chromatography", "display": "Chromatography"}
)
"""
Chromatography
Chromatography
"""
zero039 = CodeSystemConcept(
{
"code": "0039",
"definition": "Chromatography, Affinity",
"display": "Affinity chromatography",
}
)
"""
Affinity chromatography
Chromatography, Affinity
"""
zero040 = CodeSystemConcept(
{
"code": "0040",
"definition": "Chromatography, Gas liquid",
"display": "Gas liquid chromatography",
}
)
"""
Gas liquid chromatography
Chromatography, Gas liquid
"""
zero041 = CodeSystemConcept(
{
"code": "0041",
"definition": "Chromatography, High performance liquid",
"display": "High performance liquid chromatography",
}
)
"""
High performance liquid chromatography
Chromatography, High performance liquid
"""
zero042 = CodeSystemConcept(
{
"code": "0042",
"definition": "Chromatography, Liquid",
"display": "Liquid Chromatography",
}
)
"""
Liquid Chromatography
Chromatography, Liquid
"""
zero043 = CodeSystemConcept(
{
"code": "0043",
"definition": "Chromatography, Protein A affinity",
"display": "Protein A affinity chromatography",
}
)
"""
Protein A affinity chromatography
Chromatography, Protein A affinity
"""
zero044 = CodeSystemConcept(
{"code": "0044", "definition": "Coagulation", "display": "Coagulation"}
)
"""
Coagulation
Coagulation
"""
zero045 = CodeSystemConcept(
{
"code": "0045",
"definition": "Coagulation, Tilt tube",
"display": "Tilt tube coagulation time",
}
)
"""
Tilt tube coagulation time
Coagulation, Tilt tube
"""
zero046 = CodeSystemConcept(
{
"code": "0046",
"definition": "Coagulation, Tilt tube reptilase induced",
"display": "Tilt tube reptilase induced coagulation",
}
)
"""
Tilt tube reptilase induced coagulation
Coagulation, Tilt tube reptilase induced
"""
zero047 = CodeSystemConcept(
{"code": "0047", "definition": "Count, Automated", "display": "Automated count"}
)
"""
Automated count
Count, Automated
"""
zero048 = CodeSystemConcept(
{"code": "0048", "definition": "Count, Manual", "display": "Manual cell count"}
)
"""
Manual cell count
Count, Manual
"""
zero049 = CodeSystemConcept(
{
"code": "0049",
"definition": "Count, Platelet, Rees-Ecker",
"display": "Platelet count, Rees-Ecker",
}
)
"""
Platelet count, Rees-Ecker
Count, Platelet, Rees-Ecker
"""
zero050 = CodeSystemConcept(
{"code": "0050", "definition": "Culture, Aerobic", "display": "Aerobic Culture"}
)
"""
Aerobic Culture
Culture, Aerobic
"""
zero051 = CodeSystemConcept(
{
"code": "0051",
"definition": "Culture, Anaerobic",
"display": "Anaerobic Culture",
}
)
"""
Anaerobic Culture
Culture, Anaerobic
"""
zero052 = CodeSystemConcept(
{
"code": "0052",
"definition": "Culture, Chicken Embryo",
"display": "Chicken embryo culture",
}
)
"""
Chicken embryo culture
Culture, Chicken Embryo
"""
zero053 = CodeSystemConcept(
{
"code": "0053",
"definition": "Culture, Delayed secondary enrichment",
"display": "Delayed secondary enrichment",
}
)
"""
Delayed secondary enrichment
Culture, Delayed secondary enrichment
"""
zero054 = CodeSystemConcept(
{
"code": "0054",
"definition": "Culture, Microaerophilic",
"display": "Microaerophilic Culture",
}
)
"""
Microaerophilic Culture
Culture, Microaerophilic
"""
zero055 = CodeSystemConcept(
{
"code": "0055",
"definition": "Culture, Quantitative microbial, cup",
"display": "Quantitative microbial culture, cup",
}
)
"""
Quantitative microbial culture, cup
Culture, Quantitative microbial, cup
"""
zero056 = CodeSystemConcept(
{
"code": "0056",
"definition": "Culture, Quantitative microbial, droplet",
"display": "Quantitative microbial culture, droplet",
}
)
"""
Quantitative microbial culture, droplet
Culture, Quantitative microbial, droplet
"""
zero057 = CodeSystemConcept(
{
"code": "0057",
"definition": "Culture, Quantitative microbial, filter paper",
"display": "Quantitative microbial culture, filter paper",
}
)
"""
Quantitative microbial culture, filter paper
Culture, Quantitative microbial, filter paper
"""
zero058 = CodeSystemConcept(
{
"code": "0058",
"definition": "Culture, Quantitative microbial, pad",
"display": "Quantitative microbial culture, pad culture",
}
)
"""
Quantitative microbial culture, pad culture
Culture, Quantitative microbial, pad
"""
zero059 = CodeSystemConcept(
{
"code": "0059",
"definition": "Culture, Quantitative microbial, pour plate",
"display": "Quantitative microbial culture, pour plate",
}
)
"""
Quantitative microbial culture, pour plate
Culture, Quantitative microbial, pour plate
"""
zero060 = CodeSystemConcept(
{
"code": "0060",
"definition": "Culture, Quantitative microbial, surface streak",
"display": "Quantitative microbial culture, surface streak",
}
)
"""
Quantitative microbial culture, surface streak
Culture, Quantitative microbial, surface streak
"""
zero061 = CodeSystemConcept(
{
"code": "0061",
"definition": "Culture, Somatic Cell",
"display": "Somatic Cell culture",
}
)
"""
Somatic Cell culture
Culture, Somatic Cell
"""
zero062 = CodeSystemConcept(
{"code": "0062", "definition": "Diffusion, Agar", "display": "Agar diffusion"}
)
"""
Agar diffusion
Diffusion, Agar
"""
zero063 = CodeSystemConcept(
{
"code": "0063",
"definition": "Diffusion, Agar Gel Immunodiffusion",
"display": "Agar Gel Immunodiffusion",
}
)
"""
Agar Gel Immunodiffusion
Diffusion, Agar Gel Immunodiffusion
"""
zero064 = CodeSystemConcept(
{"code": "0064", "definition": "Electrophoresis", "display": "Electrophoresis"}
)
"""
Electrophoresis
Electrophoresis
"""
zero065 = CodeSystemConcept(
{
"code": "0065",
"definition": "Electrophoresis, Agaorse gel",
"display": "Agaorse gel electrophoresis",
}
)
"""
Agaorse gel electrophoresis
Electrophoresis, Agaorse gel
"""
zero066 = CodeSystemConcept(
{
"code": "0066",
"definition": "Electrophoresis, citrate agar",
"display": "Electrophoresis, citrate agar",
}
)
"""
Electrophoresis, citrate agar
Electrophoresis, citrate agar
"""
zero067 = CodeSystemConcept(
{
"code": "0067",
"definition": "Electrophoresis, Immuno",
"display": "Immunoelectrophoresis",
}
)
"""
Immunoelectrophoresis
Electrophoresis, Immuno
"""
zero068 = CodeSystemConcept(
{
"code": "0068",
"definition": "Electrophoresis, Polyacrylamide gel",
"display": "Polyacrylamide gel electrophoresis",
}
)
"""
Polyacrylamide gel electrophoresis
Electrophoresis, Polyacrylamide gel
"""
zero069 = CodeSystemConcept(
{
"code": "0069",
"definition": "Electrophoresis, Starch gel",
"display": "Starch gel electrophoresis",
}
)
"""
Starch gel electrophoresis
Electrophoresis, Starch gel
"""
zero070 = CodeSystemConcept(
{"code": "0070", "definition": "ELISA", "display": "ELISA"}
)
"""
ELISA
ELISA
"""
zero071 = CodeSystemConcept(
{
"code": "0071",
"definition": "ELISA, antigen capture",
"display": "ELISA, antigen capture",
}
)
"""
ELISA, antigen capture
ELISA, antigen capture
"""
zero072 = CodeSystemConcept(
{
"code": "0072",
"definition": "ELISA, avidin biotin peroxidase complex",
"display": "ELISA, avidin biotin peroxidase complex",
}
)
"""
ELISA, avidin biotin peroxidase complex
ELISA, avidin biotin peroxidase complex
"""
zero073 = CodeSystemConcept(
{"code": "0073", "definition": "ELISA, Kinetic", "display": "Kinetic ELISA"}
)
"""
Kinetic ELISA
ELISA, Kinetic
"""
zero074 = CodeSystemConcept(
{
"code": "0074",
"definition": "ELISA, peroxidase-antiperoxidase",
"display": "ELISA, peroxidase-antiperoxidase",
}
)
"""
ELISA, peroxidase-antiperoxidase
ELISA, peroxidase-antiperoxidase
"""
zero075 = CodeSystemConcept(
{
"code": "0075",
"definition": "Identification, API 20 Strep",
"display": "API 20 Strep",
}
)
"""
API 20 Strep
Identification, API 20 Strep
"""
zero076 = CodeSystemConcept(
{"code": "0076", "definition": "Identification, API 20A", "display": "API 20A"}
)
"""
API 20A
Identification, API 20A
"""
zero077 = CodeSystemConcept(
{
"code": "0077",
"definition": "Identification, API 20C AUX",
"display": "API 20C AUX",
}
)
"""
API 20C AUX
Identification, API 20C AUX
"""
zero078 = CodeSystemConcept(
{"code": "0078", "definition": "Identification, API 20E", "display": "API 20E"}
)
"""
API 20E
Identification, API 20E
"""
zero079 = CodeSystemConcept(
{
"code": "0079",
"definition": "Identification, API 20NE",
"display": "API 20NE",
}
)
"""
API 20NE
Identification, API 20NE
"""
zero080 = CodeSystemConcept(
{
"code": "0080",
"definition": "Identification, API 50 CH",
"display": "API 50 CH",
}
)
"""
API 50 CH
Identification, API 50 CH
"""
zero081 = CodeSystemConcept(
{
"code": "0081",
"definition": "Identification, API An-IDENT",
"display": "API An-IDENT",
}
)
"""
API An-IDENT
Identification, API An-IDENT
"""
zero082 = CodeSystemConcept(
{
"code": "0082",
"definition": "Identification, API Coryne",
"display": "API Coryne",
}
)
"""
API Coryne
Identification, API Coryne
"""
zero083 = CodeSystemConcept(
{
"code": "0083",
"definition": "Identification, API Rapid 20E",
"display": "API Rapid 20E",
}
)
"""
| |
<gh_stars>0
from io import StringIO
import pandas as pd
import networkx as nx
import wntr
import math
import os
import numpy as np
import geopandas as gpd
import infrarisk.src.physical.interdependencies as interdependencies
import infrarisk.src.physical.water.water_network_model as water
import infrarisk.src.physical.power.power_system_model as power
import infrarisk.src.physical.transportation.network as transpo
import infrarisk.src.plots as model_plots
import infrarisk.src.repair_crews as repair_crews
class IntegratedNetwork:
"""An integrated infrastructure network class"""
def __init__(
self,
name,
water_folder=None,
power_folder=None,
transp_folder=None,
power_sim_type=None,
water_sim_type=None,
):
"""Initiates the IntegratedNetwork object.
:param name: The name of the network.
:type name: string
:param water_folder: The directory that consists of required water network files, defaults to None
:type water_folder: pathlib.Path object, optional
:param power_folder: The directory that consists of required power network files, defaults to None
:type power_folder: pathlib.Path object, optional
:param transp_folder: The directory that consists of required traffic network files, defaults to None
:type transp_folder: pathlib.Path object, optional
:param power_sim_type: Power simulation type ("1ph" for single phase networks, "3ph" for three phase networks), defaults to "1ph"
:type power_sim_type: string, optional
:param water_sim_type: Type of water simulation: 'PDA' for pressure-dependent driven analysis, 'DDA' for demand driven analysis
:type water_sim_type: string
"""
self.name = name
if water_folder is None:
self.wn = None
else:
self.load_water_network(water_folder, water_sim_type=water_sim_type)
if power_folder is None:
self.pn = None
else:
self.load_power_network(power_folder, power_sim_type=power_sim_type)
if transp_folder is None:
self.tn = None
else:
self.load_transpo_network(transp_folder)
def load_networks(
self,
water_folder,
power_folder,
transp_folder,
power_sim_type="1ph",
water_sim_type="PDA",
):
"""Loads the water, power and transportation networks.
:param water_folder: The directory that consists of required water network files, defaults to None
:type water_folder: pathlib.Path object, optional
:param power_folder: The directory that consists of required power network files, defaults to None
:type power_folder: pathlib.Path object, optional
:param transp_folder: The directory that consists of required traffic network files, defaults to None
:type transp_folder: pathlib.Path object, optional
:param power_sim_type: Power simulation type ("1ph" for single phase networks, "3ph" for three phase networks), defaults to "1ph"
:type power_sim_type: string, optional
:param water_sim_type: Type of water simulation: 'PDA' for pressure-dependent driven analysis, 'DDA' for demand driven analysis
:type water_sim_type: string
"""
# load water_network model
if water_folder is not None:
self.load_water_network(water_folder, water_sim_type)
# load power systems network
if power_folder is not None:
self.load_power_network(power_folder, power_sim_type)
# load static traffic assignment network
if transp_folder is not None:
self.load_transpo_network(transp_folder)
def load_power_network(self, power_folder, power_sim_type):
"""Loads the power network.
:param power_file: The power systems file in json format
:type power_file: string
:param power_sim_type: Power simulation type ("1ph" for single phase networks, "3ph" for three phase networks), defaults to "1ph"
:type power_sim_type: string, optional
:param service_area: If True, the service area will be loaded, defaults to False
:type service_area: bool, optional
"""
try:
pn = power.load_power_network(
power_folder / "power.json", sim_type=power_sim_type
)
power.run_power_simulation(pn)
self.pn = pn
self.power_sim_time = power_sim_type
self.base_power_supply = power.generate_base_supply(pn)
except UserWarning:
print(
"Error: The power systems file does not exist. No such file or directory: ",
power_folder / "power.json",
)
if os.path.exists(power_folder / "line_to_switch_map.csv"):
line_switch_df = pd.read_csv(
power_folder / "line_to_switch_map.csv", sep=","
)
self.line_switch_dict = dict()
for _, row in line_switch_df.iterrows():
line = row["line"]
switch_list = [x for x in row[1:] if x is not np.nan]
self.line_switch_dict[line] = switch_list
if os.path.exists(power_folder / "service_area/service_area.shp"):
print("Loading power service area details...")
pn.service_area = gpd.read_file(
power_folder / "service_area/service_area.shp",
crs={"init": "epsg:4326"},
)
pn.service_area = pn.service_area.to_crs({"init": "epsg:3857"})
pn.service_area.Power_Node = "P_LO" + pn.service_area.Power_Node.astype(str)
pn.service_area.Id = pn.service_area.index
def load_water_network(self, water_folder, water_sim_type):
"""Loads the water network.
:param water_folder: The directory that consists of required water network files
:type water_folder: pathlib.Path object
:param water_sim_type: Type of water simulation: 'PDA' for pressure-dependent driven analysis, 'DDA' for demand driven analysis
:type water_sim_type: string
"""
initial_sim_step = 60
self.wn = water.load_water_network(
f"{water_folder}/water.inp", water_sim_type, initial_sim_step
)
self.water_sim_type = water_sim_type
if water_sim_type == "DDA":
if not os.path.exists(water_folder / "base_water_node_supply.csv"):
print("Generating base water supply values...")
water.generate_base_supply(self.wn, water_folder)
self.base_water_node_supply = pd.read_csv(
water_folder / "base_water_node_supply.csv"
)
self.base_water_link_flow = pd.read_csv(
water_folder / "base_water_link_flow.csv"
)
elif self.water_sim_type == "PDA":
if not os.path.exists(water_folder / "base_water_node_supply_pda.csv"):
print("Generating base water supply values...")
water.generate_base_supply_pda(self.wn, water_folder)
self.base_water_node_supply = pd.read_csv(
water_folder / "base_water_node_supply_pda.csv"
)
self.base_water_link_flow = pd.read_csv(
water_folder / "base_water_link_flow_pda.csv"
)
if os.path.exists(water_folder / "pipe_to_valve_map.csv"):
pipe_valve_df = pd.read_csv(water_folder / "pipe_to_valve_map.csv", sep=",")
self.pipe_valve_dict = dict()
for _, row in pipe_valve_df.iterrows():
pipe = row["pipe"]
valve_list = [x for x in row[1:] if x is not np.nan]
self.pipe_valve_dict[pipe] = valve_list
if os.path.exists(water_folder / "service_area/service_area.shp"):
print("Loading water service area details...")
self.wn.service_area = gpd.read_file(
water_folder / "service_area/service_area.shp",
crs={"init": "epsg:4326"},
)
self.wn.service_area = self.wn.service_area.to_crs({"init": "epsg:3857"})
self.wn.service_area.Water_Node = (
"W_J" + self.wn.service_area.Water_Node.astype(str)
)
self.wn.service_area.Id = self.wn.service_area.index
def load_transpo_network(self, transp_folder):
"""Loads the transportation network.
:param transp_folder: The directory that consists of required transportation network files
:type transp_folder: string
"""
try:
tn = transpo.Network(
f"{transp_folder}/transpo_net.tntp",
f"{transp_folder}/transpo_trips.tntp",
f"{transp_folder}/transpo_node.tntp",
)
print(
f"Transportation network successfully loaded from {transp_folder}. Static traffic assignment method will be used to calculate travel times."
)
# tn.userEquilibrium("FW", 400, 1e-4, tn.averageExcessCost)
self.tn = tn
self.base_transpo_flow = tn
except FileNotFoundError:
print(
f"Error: The transportation network folder does not exist. No such directory: {transp_folder}."
)
except AttributeError:
print("Error: Some required network files not found.")
def generate_integrated_graph(self, basemap=False):
"""Generates the integrated network as a Networkx graph."""
self.power_graph = self.generate_power_networkx_graph()
print("Successfully added power network to the integrated graph...")
self.water_graph = self.generate_water_networkx_graph()
print("Successfully added water network to the integrated graph...")
self.transpo_graph = self.generate_transpo_networkx_graph()
print("Successfully added transportation network to the integrated graph...")
G = nx.compose(
self.power_graph, nx.compose(self.water_graph, self.transpo_graph)
)
self.integrated_graph = G
self.set_map_extends()
print("Integrated graph successffully created.")
title = f"{self.name} integrated network"
self.generate_betweenness_centrality()
model_plots.plot_bokeh_from_integrated_graph(
G, title=title, extent=self.map_extends, basemap=basemap
)
def generate_betweenness_centrality(self):
"""Generates the betweenness centrality of the integrated graph."""
print("Generating betweenness centrality...")
pn_nx = self.power_graph
wn_nx = self.water_graph
tn_nx = self.transpo_graph
self.pn_nodebc = nx.betweenness_centrality(pn_nx, normalized=True)
self.pn_edgebc = nx.edge_betweenness_centrality(pn_nx, normalized=True)
self.wn_nodebc = nx.betweenness_centrality(wn_nx, normalized=True)
self.wn_edgebc = nx.edge_betweenness_centrality(wn_nx, normalized=True)
self.tn_nodebc = nx.betweenness_centrality(tn_nx, normalized=True)
self.tn_edgebc = nx.edge_betweenness_centrality(tn_nx, normalized=True)
def set_map_extends(self):
"""Sets the extents of the map in the format ((xmin, ymin), (xmax, ymax))."""
x, y = [], []
for node in self.integrated_graph.nodes:
x_coord, y_coord = self.integrated_graph.nodes[node]["coord"]
x.append(x_coord)
y.append(y_coord)
xdiff = math.floor(max(x)) - math.floor(min(x))
ydiff = math.floor(max(y)) - math.floor(min(y))
tol = 0.2
self.map_extends = [
(math.floor(min(x)) - tol * xdiff, math.floor(min(y)) - tol * ydiff),
(math.floor(max(x)) + tol * xdiff, math.floor(max(y)) + tol * ydiff),
]
def get_map_extends(self):
"""Returns the extents of the map in the format ((xmin, ymin), (xmax, ymax)).
:return: The extent of the integrated graph (coordinates)
:rtype: list of tuples
"""
return self.map_extends
def generate_power_networkx_graph(self, plot=False):
"""Generates the power network as a networkx object.
:param plot: To generate the network plot, defaults to False.
:type plot: bool, optional
:return: The power network as a networkx object.
:rtype: Networkx object
"""
G_power = nx.Graph()
# power network nodes
power_nodes = pd.DataFrame(
columns=["id", "node_type", "node_category", "x", "y"]
)
for index, row in self.pn.bus.iterrows():
power_nodes = power_nodes.append(
{
"id": row["name"],
"node_type": "power_node",
"node_category": "Bus",
"x": self.pn.bus_geodata.x[index],
"y": self.pn.bus_geodata.y[index],
},
ignore_index=True,
)
# power network links
power_links = pd.DataFrame(
columns=["id", "link_type", "link_category", "from", "to"]
)
for _, row in self.pn.line.iterrows():
power_links = power_links.append(
{
"id": row["name"],
"link_type": "Power",
"link_category": "Power line",
"from": self.pn.bus.name.values[row["from_bus"]],
"to": self.pn.bus.name.values[row["to_bus"]],
},
ignore_index=True,
)
for _, row in self.pn.trafo.iterrows():
power_links = power_links.append(
{
"id": row["name"],
"link_type": "Power",
"link_category": "Transformer",
"from": self.pn.bus.name.values[row["hv_bus"]],
"to": self.pn.bus.name.values[row["lv_bus"]],
},
ignore_index=True,
)
for _, row in self.pn.switch[self.pn.switch.et == "b"].iterrows():
power_links = power_links.append(
{
"id": row["name"],
"link_type": "Power",
"link_category": "Switch",
"from": self.pn.bus.name.values[row["bus"]],
"to": self.pn.bus.name.values[row["element"]],
},
ignore_index=True,
)
G_power = nx.from_pandas_edgelist(
power_links,
source="from",
target="to",
edge_attr=True,
)
for _, row in power_nodes.iterrows():
G_power.nodes[row["id"]]["node_type"] = row["node_type"]
G_power.nodes[row["id"]]["node_category"] = row["node_category"]
G_power.nodes[row["id"]]["coord"] = (row["x"], row["y"])
for graph in [G_power]:
for _, link in enumerate(graph.edges.keys()):
start_node, end_node = link
start_coords = graph.nodes[link[0]]["coord"]
end_coords = graph.nodes[link[1]]["coord"]
graph.edges[link]["length"] = round(
math.sqrt(
((start_coords[0] - end_coords[0]) ** 2)
+ ((start_coords[1] - end_coords[1]) ** 2)
),
3,
)
if plot == True:
pos = {node: G_power.nodes[node]["coord"] for node in power_nodes.id}
nx.draw(G_power, pos, node_size=1)
return G_power
def generate_water_networkx_graph(self, plot=False):
"""Generates the water network as a networkx object.
:param plot: To generate the network plot, defaults to False., defaults to False.
:type plot: bool, optional
:return: The water network as a networkx object.
:rtype: Networkx object
"""
G_water = nx.Graph()
# water network nodes
| |
import h5py
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
import math
from numpy.random import RandomState
import pandas as pd
import matplotlib.pyplot as plt
import time
import random
import pickle
def load_dataset(data, frac, list_of_inputs, list_of_outputs, consistent = False, seed = 4):
"""
Loads the dataset and splits into into train/test-set with the given proportion split
and returns the inputs and outputs of each set
Arguments:
data -- string with name of datafile (datafile of shape (samples, features))
frac -- the split for the training set
list_of_inputs -- list with strings of input columns names
list_of_outputs -- list with strings of output columns names
consistent -- False which means random split of sets
seed -- value of random seed, works only if consistent is set to True
Returns:
train_set_x -- inputs of training set of shape (inputs, number of samples)
train_set_y -- outputs of training set (labels) of shape (output, number of samples)
test_set_x -- inputs of test set of shape (inputs, number of samples)
test_set_y -- outputs of test set (labels) of shape (output, number of samples)
"""
if consistent == True:
df = pd.read_csv(data)
print("Dataset shape:", df.shape)
rng = RandomState(seed)
else:
df = pd.read_csv(data)
print("Dataset shape:", df.shape)
rng = RandomState()
train_set = df.sample(frac=frac, random_state=rng)
test_set = df.loc[~df.index.isin(train_set.index)]
train_set_x = np.array(train_set[list_of_inputs][:])
train_set_y = np.array(train_set[list_of_outputs][:])
test_set_x = np.array(test_set[list_of_inputs][:])
test_set_y = np.array(test_set[list_of_outputs][:])
train_set_y = train_set_y.reshape((1, train_set_y.shape[0]))
test_set_y = test_set_y.reshape((1, test_set_y.shape[0]))
return train_set_x.T, train_set_y, test_set_x.T, test_set_y
def separate_dataset(data, list_of_inputs, list_of_outputs):
"""
Seperates the dataset into inputs and outputs
Arguments:
data -- string with name of datafile
list_of_inputs -- list with strings of input columns names
list_of_outputs -- list with strings of output columns names
Returns:
x -- inputs
y -- outputs(labels)
"""
df = data
print("Dataset shape:", df.shape)
x = np.array(df[list_of_inputs][:])
y = np.array(df[list_of_outputs][:])
y = y.reshape((1, y.shape[0]))
return x.T, y
def normalize_inputs(train, test, name_mean = "normalizations/mean_value.pkl", name_std = "normalizations/std_value.pkl"):
"""
Arguments:
train -- the training set inputs
test -- the test set inputs
Returns:
train_norm -- the normalized training set inputs
test_norm -- the normalized test set inputs
name_mean -- name of the file where the mean value is stored
name_std -- name of the file where the standard deviation value is stored
"""
mean = []
std = []
for i in range(train.shape[0]):
if np.std(train[i,:]) == 0:
mean.append([0])
std.append([1])
else:
mean.append([np.mean(train[i,:])])
std.append([np.std(train[i,:])])
train_norm = (train - np.array(mean)) / np.array(std)
test_norm = (test - np.array(mean)) / np.array(std)
f = open(name_mean, 'wb')
pickle.dump(mean, f)
f.close()
f = open(name_std, 'wb')
pickle.dump(std, f)
f.close()
return train_norm, test_norm
def normalize_with_existing(train, test, name_mean, name_std ):
"""
Arguments:
train -- the training set inputs
test -- the test set inputs
name_mean -- name of the file where the mean value is stored
name_std -- name of the file where the standard deviation value is stored
Returns:
train_norm -- the normalized training set inputs
test_norm -- the normalized test set inputs
"""
a_file = open(name_mean, "rb")
mean = pickle.load(a_file)
a_file = open(name_std, "rb")
std = pickle.load(a_file)
train_norm = (train - np.array(mean)) / np.array(std)
test_norm = (test - np.array(mean)) / np.array(std)
return train_norm, test_norm
def normalize_with_existing_wholedf(df, name_mean, name_std ):
"""
Arguments:
train -- the training set inputs
test -- the test set inputs
name_mean -- name of the file where the mean value is stored
name_std -- name of the file where the standard deviation value is stored
Returns:
train_norm -- the normalized training set inputs
test_norm -- the normalized test set inputs
"""
a_file = open(name_mean, "rb")
mean = pickle.load(a_file)
a_file = open(name_std, "rb")
std = pickle.load(a_file)
df_norm = (df - np.array(mean)) / np.array(std)
return df_norm
def create_placeholders(n_x, n_y, m = 0):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of input
n_y -- scalar, size of output
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
m -- number of samples if they are known, else m = 0 by default which sets the outputs tensor shape to (num of outputs, None)
Tips:
- You will use None because it let's us be flexible on the number of examples you will for the placeholders.
In fact, the number of examples during test/train is different.
"""
if m == 0:
X = tf.compat.v1.placeholder("float", [n_x, None])
Y = tf.compat.v1.placeholder("float", [n_y, None])
else:
X = tf.compat.v1.placeholder("float", [n_x, m])
Y = tf.compat.v1.placeholder("float", [n_y, m])
return X, Y
def initialize_parameters(W, n_x):
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [W[0], n_x]
b1 : [b[0], 1]
W2 : [W[1], W[0]]
b2 : [b[1], 1]
...
...
Arguments:
W -- list with number of neurons per layer, the layer being the index of the list
n_x -- number of inputs
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
#tf.set_random_seed(1) # so that your "random" numbers are consistent
parameters = {}
parameters["W1"] = tf.compat.v1.get_variable("W1", [W[0],n_x], initializer = tf.keras.initializers.GlorotNormal())
#parameters["W1"] = tf.compat.v1.get_variable("W1", [W[0],n_x], initializer = tf.random_normal_initializer())
parameters["b1"] = tf.compat.v1.get_variable("b1", [W[0],1], initializer = tf.zeros_initializer())
for i in range(1,len(W)):
parameters["W" + str(i+1)] = tf.compat.v1.get_variable("W" + str(i+1), [W[i],W[i-1]], initializer = tf.keras.initializers.GlorotNormal())
#parameters["W" + str(i+1)] = tf.compat.v1.get_variable("W" + str(i+1), [W[i],W[i-1]], initializer = tf.random_normal_initializer())
parameters["b" + str(i+1)] = tf.compat.v1.get_variable("b" + str(i+1), [W[i],1], initializer = tf.zeros_initializer())
return parameters
def forward_propagation(X, parameters, activations, dropout_layers = [], dropout_rates= []):
"""
Implements the forward propagation for the model: LINEAR -> activation -> LINEAR -> activation .... -> LINEAR
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"....
the shapes are given in initialize_parameters
activations -- a list containing the activations for each hidden layer in order
dropout_layers -- list whose elements indicate the number of the layer on which dropout should be applied. Example [1,3,4]
dropout_rates -- list whose elements define the rate of the dropout that is applied to the layers of same index value in dropout_layers. Example [0.3, 0.5, 0.7]]
Returns:
Z_final -- the output of the last LINEAR unit (number of outputs, number of samples (m))
"""
if len(activations) == (len(parameters)/2 - 1):
if len(activations) >= 2:
equations = {}
Z1 = tf.add(tf.matmul(parameters['W1'], X), parameters['b1']) #Z1 = np.dot(parameters['W1'], X) + parameters['b1']
if activations[0] == "relu":
#print("Relu activated")
A1 = tf.nn.relu(Z1)
elif activations[0] == "sigmoid":
#print("Sigmoid activated")
A1 = tf.nn.sigmoid(Z1)
elif activations[0] == "tanh":
#print("Tanh activated")
A1 = tf.nn.tanh(Z1)
elif activations[0] == "none":
#print("No Activation")
A1 = Z1
if 1 in dropout_layers:
tf.nn.dropout(A1, dropout_rates[dropout_layers.index(1)])
print("applied dropout on layer 1 with rate of", dropout_rates[dropout_layers.index(1)])
equations["Z1"] = Z1
equations["A1"] = A1
for i in range(2, (len(activations) + 1)):
x = "Z" + str(i)
y = "A" + str(i)
equations["Z" + str(i)] = tf.add(tf.matmul(parameters['W' + str(i)], equations['A' + str(i-1)]), parameters['b' + str(i)])
if activations[i-1] == "relu":
#print("Relu activated.")
equations["A" + str(i)] = tf.nn.relu(equations["Z" + str(i)])
elif activations[i-1] == "sigmoid":
#print("Sigmoid activated.")
equations["A" + str(i)] = tf.nn.sigmoid(equations["Z" + str(i)])
elif activations[i-1] == "tanh":
#print("Tanh activated.")
equations["A" + str(i)] = tf.nn.tanh(equations["Z" + str(i)])
elif activations[i-1] == "none":
#print("No Activation")
equations["A" + str(i)] = equations["Z" + str(i)]
if i in dropout_layers:
tf.nn.dropout(equations["A" + str(i)], dropout_rates[dropout_layers.index(i)])
print("applied dropout on layer", i, "with rate of", dropout_rates[dropout_layers.index(i)])
Z_final = tf.add(tf.matmul(parameters['W' + str(i+1)], equations["A" + str(i)]), parameters['b' + str(i+1)])
#Z_final = tf.nn.relu(Z_final)
return Z_final
elif len(activations) == 1:
print("only one activation")
equations = {}
Z1 = tf.add(tf.matmul(parameters['W1'], X), parameters['b1']) #Z1 = np.dot(parameters['W1'], X) + parameters['b1']
if activations[0] == "relu":
#print("Relu activated")
A1 = tf.nn.relu(Z1)
if activations[0] == "sigmoid":
#print("Sigmoid activated")
A1 = tf.nn.sigmoid(Z1)
if activations[0] == "tanh":
#print("Tanh activated")
A1 = tf.nn.tanh(Z1)
if activations[0] == "none":
#print("No Activation")
A1 = tf.nn.tanh(Z1)
if 1 in dropout_layers: | |
import logging
import os
import shutil
import warnings
from types import TracebackType
from typing import Any, Callable, Dict, List, Optional, Text, Type, Collection
import rasa.core.utils
import rasa.utils.io
from rasa.cli import utils
from rasa.cli.utils import bcolors
from rasa.constants import (
DEFAULT_LOG_LEVEL,
DEFAULT_LOG_LEVEL_LIBRARIES,
ENV_LOG_LEVEL,
ENV_LOG_LEVEL_LIBRARIES,
GLOBAL_USER_CONFIG_PATH,
NEXT_MAJOR_VERSION_FOR_DEPRECATIONS,
)
logger = logging.getLogger(__name__)
class TempDirectoryPath(str):
"""Represents a path to an temporary directory. When used as a context
manager, it erases the contents of the directory on exit.
"""
def __enter__(self) -> "TempDirectoryPath":
return self
def __exit__(
self,
_exc: Optional[Type[BaseException]],
_value: Optional[Exception],
_tb: Optional[TracebackType],
) -> bool:
if os.path.exists(self):
shutil.rmtree(self)
def arguments_of(func: Callable) -> List[Text]:
"""Return the parameters of the function `func` as a list of names."""
import inspect
return list(inspect.signature(func).parameters.keys())
def read_global_config() -> Dict[Text, Any]:
"""Read global Rasa configuration."""
# noinspection PyBroadException
try:
return rasa.utils.io.read_config_file(GLOBAL_USER_CONFIG_PATH)
except Exception:
# if things go south we pretend there is no config
return {}
def set_log_level(log_level: Optional[int] = None):
"""Set log level of Rasa and Tensorflow either to the provided log level or
to the log level specified in the environment variable 'LOG_LEVEL'. If none is set
a default log level will be used."""
import logging
if not log_level:
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
log_level = logging.getLevelName(log_level)
logging.getLogger("rasa").setLevel(log_level)
update_tensorflow_log_level()
update_asyncio_log_level()
update_apscheduler_log_level()
update_socketio_log_level()
os.environ[ENV_LOG_LEVEL] = logging.getLevelName(log_level)
def update_apscheduler_log_level() -> None:
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
apscheduler_loggers = [
"apscheduler",
"apscheduler.scheduler",
"apscheduler.executors",
"apscheduler.executors.default",
]
for logger_name in apscheduler_loggers:
logging.getLogger(logger_name).setLevel(log_level)
logging.getLogger(logger_name).propagate = False
def update_socketio_log_level() -> None:
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
socketio_loggers = ["websockets.protocol", "engineio.server", "socketio.server"]
for logger_name in socketio_loggers:
logging.getLogger(logger_name).setLevel(log_level)
logging.getLogger(logger_name).propagate = False
def update_tensorflow_log_level() -> None:
"""Set the log level of Tensorflow to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'."""
# Disables libvinfer, tensorRT, cuda, AVX2 and FMA warnings (CPU support). This variable needs to be set before the
# first import since some warnings are raised on the first import.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
if log_level == "DEBUG":
tf_log_level = tf.compat.v1.logging.DEBUG
elif log_level == "INFO":
tf_log_level = tf.compat.v1.logging.INFO
elif log_level == "WARNING":
tf_log_level = tf.compat.v1.logging.WARN
else:
tf_log_level = tf.compat.v1.logging.ERROR
tf.compat.v1.logging.set_verbosity(tf_log_level)
logging.getLogger("tensorflow").propagate = False
def update_sanic_log_level(log_file: Optional[Text] = None):
"""Set the log level of sanic loggers to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'."""
from sanic.log import logger, error_logger, access_logger
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logger.setLevel(log_level)
error_logger.setLevel(log_level)
access_logger.setLevel(log_level)
logger.propagate = False
error_logger.propagate = False
access_logger.propagate = False
if log_file is not None:
formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
error_logger.addHandler(file_handler)
access_logger.addHandler(file_handler)
def update_asyncio_log_level() -> None:
"""Set the log level of asyncio to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'."""
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logging.getLogger("asyncio").setLevel(log_level)
def set_log_and_warnings_filters() -> None:
"""
Set log filters on the root logger, and duplicate filters for warnings.
Filters only propagate on handlers, not loggers.
"""
for handler in logging.getLogger().handlers:
handler.addFilter(RepeatedLogFilter())
warnings.filterwarnings("once", category=UserWarning)
def obtain_verbosity() -> int:
"""Returns a verbosity level according to the set log level."""
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
verbosity = 0
if log_level == "DEBUG":
verbosity = 2
if log_level == "INFO":
verbosity = 1
return verbosity
def is_logging_disabled() -> bool:
"""Returns true, if log level is set to WARNING or ERROR, false otherwise."""
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
return log_level == "ERROR" or log_level == "WARNING"
def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]:
"""Sorts a list of dictionaries by their first key."""
return sorted(dicts, key=lambda d: list(d.keys())[0])
def transform_collection_to_sentence(collection: Collection[Text]) -> Text:
"""Transforms e.g. a list like ['A', 'B', 'C'] into a sentence 'A, B and C'."""
x = list(collection)
if len(x) >= 2:
return ", ".join(map(str, x[:-1])) + " and " + x[-1]
return "".join(collection)
# noinspection PyUnresolvedReferences
def class_from_module_path(
module_path: Text, lookup_path: Optional[Text] = None
) -> Any:
"""Given the module name and path of a class, tries to retrieve the class.
The loaded class can be used to instantiate new objects. """
import importlib
# load the module, will raise ImportError if module cannot be loaded
if "." in module_path:
module_name, _, class_name = module_path.rpartition(".")
m = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
return getattr(m, class_name)
else:
module = globals().get(module_path, locals().get(module_path))
if module is not None:
return module
if lookup_path:
# last resort: try to import the class from the lookup path
m = importlib.import_module(lookup_path)
return getattr(m, module_path)
else:
raise ImportError(f"Cannot retrieve class from path {module_path}.")
def minimal_kwargs(
kwargs: Dict[Text, Any], func: Callable, excluded_keys: Optional[List] = None
) -> Dict[Text, Any]:
"""Returns only the kwargs which are required by a function. Keys, contained in
the exception list, are not included.
Args:
kwargs: All available kwargs.
func: The function which should be called.
excluded_keys: Keys to exclude from the result.
Returns:
Subset of kwargs which are accepted by `func`.
"""
excluded_keys = excluded_keys or []
possible_arguments = arguments_of(func)
return {
k: v
for k, v in kwargs.items()
if k in possible_arguments and k not in excluded_keys
}
def write_global_config_value(name: Text, value: Any) -> None:
"""Read global Rasa configuration."""
try:
os.makedirs(os.path.dirname(GLOBAL_USER_CONFIG_PATH), exist_ok=True)
c = read_global_config()
c[name] = value
rasa.core.utils.dump_obj_as_yaml_to_file(GLOBAL_USER_CONFIG_PATH, c)
except Exception as e:
logger.warning(f"Failed to write global config. Error: {e}. Skipping.")
def read_global_config_value(name: Text, unavailable_ok: bool = True) -> Any:
"""Read a value from the global Rasa configuration."""
def not_found():
if unavailable_ok:
return None
else:
raise ValueError(f"Configuration '{name}' key not found.")
if not os.path.exists(GLOBAL_USER_CONFIG_PATH):
return not_found()
c = read_global_config()
if name in c:
return c[name]
else:
return not_found()
def mark_as_experimental_feature(feature_name: Text) -> None:
"""Warns users that they are using an experimental feature."""
logger.warning(
f"The {feature_name} is currently experimental and might change or be "
"removed in the future 🔬 Please share your feedback on it in the "
"forum (https://forum.rasa.com) to help us make this feature "
"ready for production."
)
def update_existing_keys(
original: Dict[Any, Any], updates: Dict[Any, Any]
) -> Dict[Any, Any]:
"""Iterate through all the updates and update a value in the original dictionary.
If the updates contain a key that is not present in the original dict, it will
be ignored."""
updated = original.copy()
for k, v in updates.items():
if k in updated:
updated[k] = v
return updated
def lazy_property(function: Callable) -> Any:
"""Allows to avoid recomputing a property over and over.
The result gets stored in a local var. Computation of the property
will happen once, on the first call of the property. All
succeeding calls will use the value stored in the private property."""
attr_name = "_lazy_" + function.__name__
@property
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, function(self))
return getattr(self, attr_name)
return _lazyprop
def raise_warning(
message: Text,
category: Optional[Type[Warning]] = None,
docs: Optional[Text] = None,
**kwargs: Any,
) -> None:
"""Emit a `warnings.warn` with sensible defaults and a colored warning msg."""
original_formatter = warnings.formatwarning
def should_show_source_line() -> bool:
if "stacklevel" not in kwargs:
if category == UserWarning or category is None:
return False
if category == FutureWarning:
return False
return True
def formatwarning(
message: Text,
category: Optional[Type[Warning]],
filename: Text,
lineno: Optional[int],
line: Optional[Text] = None,
):
"""Function to format a warning the standard way."""
if not should_show_source_line():
if docs:
line = f"More info at {docs}"
else:
line = ""
formatted_message = original_formatter(
message, category, filename, lineno, line
)
return utils.wrap_with_color(formatted_message, color=bcolors.WARNING)
if "stacklevel" not in kwargs:
# try to set useful defaults for the most common warning categories
if category == DeprecationWarning:
kwargs["stacklevel"] = 3
elif category in (UserWarning, FutureWarning):
kwargs["stacklevel"] = 2
warnings.formatwarning = formatwarning
warnings.warn(message, category=category, **kwargs)
warnings.formatwarning = original_formatter
def raise_deprecation_warning(
message: Text,
warn_until_version: Text = NEXT_MAJOR_VERSION_FOR_DEPRECATIONS,
docs: Optional[Text] = None,
**kwargs: Any,
) -> None:
"""
Thin wrapper around `raise_warning()` to raise a deprecation warning. It requires
a version until which we'll warn, and after which the support for the feature will
be removed.
"""
if warn_until_version not in message:
message = f"{message} (will be removed in {warn_until_version})"
# need the correct stacklevel now
kwargs.setdefault("stacklevel", 3)
# we're raising a `FutureWarning` instead of a `DeprecationWarning` because
# we want these warnings to be visible in the terminal of our users
# https://docs.python.org/3/library/warnings.html#warning-categories
raise_warning(message, FutureWarning, docs, **kwargs)
class RepeatedLogFilter(logging.Filter):
"""Filter repeated log records."""
last_log = None
def filter(self, record):
current_log = (
record.levelno,
record.pathname,
record.lineno,
record.msg,
| |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import copy
import time
from test.intelliflow.core.signal_processing.dimension_constructs.test_dimension_spec import TestDimensionSpec
from test.intelliflow.core.signal_processing.routing_runtime_constructs import create_incoming_signal
from test.intelliflow.core.signal_processing.signal.test_signal_link_node import signal_dimension_tuple
import pytest
from intelliflow.core.platform.constructs import RoutingHookInterface
from intelliflow.core.serialization import dumps, loads
from intelliflow.core.signal_processing.definitions.dimension_defs import Type
from intelliflow.core.signal_processing.routing_runtime_constructs import *
from intelliflow.core.signal_processing.signal import *
from intelliflow.core.signal_processing.signal_source import InternalDatasetSignalSourceAccessSpec
from intelliflow.core.signal_processing.slot import SlotType
def _create_hook(code: str = "pass") -> Slot:
return Slot(SlotType.SYNC_INLINED, dumps(code), None, None, None, None)
class TestRoute:
@classmethod
def _route_1_basic(cls):
from test.intelliflow.core.signal_processing.test_slot import TestSlot
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
from test.intelliflow.core.signal_processing.signal.test_signal_link_node import TestSignalLinkNode
signal_link_node = copy.deepcopy(TestSignalLinkNode.signal_link_node_1)
output_spec = DimensionSpec.load_from_pretty({"output_dim": {type: Type.LONG}})
output_dim_link_matrix = [
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim"), lambda x: x, signal_dimension_tuple(TestSignal.signal_internal_1, "dim_1_1")
)
]
output_filter = signal_link_node.get_output_filter(
output_spec,
# Logical equivalent -> output_dim = (signal_internal_1('dim_1_1')
output_dim_link_matrix,
)
output_signal = Signal(
TestSignal.signal_internal_1.type,
InternalDatasetSignalSourceAccessSpec("sample_data", output_spec, **{}),
SignalDomainSpec(output_spec, output_filter, TestSignal.signal_internal_1.domain_spec.integrity_check_protocol),
"sample_data",
)
return Route(
f"InternalDataNode-{output_signal.alias}",
signal_link_node,
output_signal,
output_dim_link_matrix,
[TestSlot.slot_batch_compute_basic],
False,
)
@classmethod
def _route_2_two_inputs_linked(cls):
from test.intelliflow.core.signal_processing.test_slot import TestSlot
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
from test.intelliflow.core.signal_processing.signal.test_signal_link_node import TestSignalLinkNode
signal_link_node = copy.deepcopy(TestSignalLinkNode.signal_link_node_2)
output_spec = DimensionSpec.load_from_pretty({"output_dim": {type: Type.LONG}})
output_dim_link_matrix = [
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim"), lambda x: x, signal_dimension_tuple(TestSignal.signal_internal_1, "dim_1_1")
)
]
output_filter = signal_link_node.get_output_filter(output_spec, output_dim_link_matrix)
output_signal = Signal(
TestSignal.signal_internal_1.type,
InternalDatasetSignalSourceAccessSpec("sample_data_2", output_spec, **{}),
SignalDomainSpec(output_spec, output_filter, TestSignal.signal_internal_1.domain_spec.integrity_check_protocol),
"sample_data_2",
)
return Route(
f"InternalDataNode-{output_signal.alias}",
signal_link_node,
output_signal,
output_dim_link_matrix,
[TestSlot.slot_batch_compute_basic],
False,
)
@classmethod
def _route_3_three_inputs_unlinked(cls):
from test.intelliflow.core.signal_processing.test_slot import TestSlot
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
from test.intelliflow.core.signal_processing.signal.test_signal_link_node import TestSignalLinkNode
signal_link_node = copy.deepcopy(TestSignalLinkNode.signal_link_node_3_complex)
# create sample expected output
output_spec = DimensionSpec.load_from_pretty(
{"output_dim_1": {type: Type.LONG, "output_dim_2": {type: Type.LONG, "output_dim_3": {type: Type.LONG}}}}
)
output_dim_link_matrix = [
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim_1"),
lambda x: x,
signal_dimension_tuple(TestSignal.signal_internal_complex_1, "dim_1_1"),
),
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim_2"),
# input's sub dimension is of type String, convert it.
# because output spec expects it to be of type Long.
lambda x: ord(x),
signal_dimension_tuple(TestSignal.signal_internal_complex_1, "dim_1_2"),
),
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim_3"),
# and this one is from the 3rd input (which has only one dim 'dim_1_1')
lambda x: x,
signal_dimension_tuple(TestSignal.signal_s3_1, "dim_1_1"),
),
]
output_filter = signal_link_node.get_output_filter(output_spec, output_dim_link_matrix)
output_signal = Signal(
SignalType.INTERNAL_PARTITION_CREATION,
InternalDatasetSignalSourceAccessSpec("sample_data_3", output_spec, **{}),
SignalDomainSpec(output_spec, output_filter, TestSignal.signal_internal_complex_1.domain_spec.integrity_check_protocol),
"sample_data_3",
)
return Route(
f"InternalDataNode-{output_signal.alias}",
signal_link_node,
output_signal,
output_dim_link_matrix,
[TestSlot.slot_batch_compute_basic],
False,
)
@classmethod
def _route_3_three_inputs_linked(cls):
from test.intelliflow.core.signal_processing.test_slot import TestSlot
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
from test.intelliflow.core.signal_processing.signal.test_signal_link_node import TestSignalLinkNode
signal_link_node = copy.deepcopy(TestSignalLinkNode.signal_link_node_3_complex)
# add links (since the dimension names on same, use the auto-linking of dimensions,
# so that;
# signal_internal_complex_1['dim_1_1'] == signal_s3_1['dim_1_1'], etc
signal_link_node.compensate_missing_links()
# create sample expected output
output_spec = DimensionSpec.load_from_pretty(
{
"output_dim_1": {
type: Type.LONG,
"output_dim_2": {
type: Type.LONG,
},
}
}
)
output_dim_link_matrix = [
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim_1"),
# from the second dimension of the first/second inputs (convert to Long)
lambda x: ord(x),
signal_dimension_tuple(TestSignal.signal_internal_complex_1, "dim_1_2"),
),
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim_2"),
# and this one is from the 3rd input (which has only one dim 'dim_1_1')
lambda x: x,
signal_dimension_tuple(TestSignal.signal_s3_1, "dim_1_1"),
),
]
output_filter = signal_link_node.get_output_filter(output_spec, output_dim_link_matrix)
output_signal = Signal(
SignalType.INTERNAL_PARTITION_CREATION,
InternalDatasetSignalSourceAccessSpec("sample_data_4", output_spec, **{}),
SignalDomainSpec(output_spec, output_filter, TestSignal.signal_internal_complex_1.domain_spec.integrity_check_protocol),
"sample_data_4",
)
return Route(
f"InternalDataNode-{output_signal.alias}",
signal_link_node,
output_signal,
output_dim_link_matrix,
[TestSlot.slot_batch_compute_basic],
False,
)
def test_route_init(self):
assert self._route_1_basic()
def test_route_init_with_hooks(self):
route = self._route_1_basic()
Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_create_hook(),
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[RouteCheckpoint(5, _create_hook())],
),
30 * 24 * 60 * 60,
RoutePendingNodeHook(on_pending_node_created=_create_hook(), on_expiration=_create_hook(), checkpoints=None),
)
# check another instantiation case + checkpoint sorting
assert (
Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_create_hook(),
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[],
),
None,
RoutePendingNodeHook(
on_pending_node_created=_create_hook(),
on_expiration=None,
checkpoints=[RouteCheckpoint(2, _create_hook()), RouteCheckpoint(1, _create_hook())],
),
)
.pending_node_hook.checkpoints[0]
.checkpoint_in_secs
== 1
)
def test_route_init_with_hook_chain(self):
route = self._route_1_basic()
callback1_var = None
callback1_var_expected = 1
def _callback1(*args, **kwargs):
nonlocal callback1_var
callback1_var = callback1_var_expected
callback2_var = None
callback2_var_expected = 2
def _callback2(*args, **kwargs):
nonlocal callback2_var
callback2_var = callback2_var_expected
hook1 = RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_callback1,
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[RouteCheckpoint(5, _create_hook())],
)
hook2 = RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_callback2,
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[RouteCheckpoint(10, _create_hook())],
)
exec_hook_chain = hook1.chain(hook2)
pending_hook1 = RoutePendingNodeHook(
on_pending_node_created=_create_hook(), on_expiration=_create_hook(), checkpoints=[RouteCheckpoint(5, _create_hook())]
)
pending_hook2 = RoutePendingNodeHook(
on_pending_node_created=_create_hook(), on_expiration=_create_hook(), checkpoints=[RouteCheckpoint(10, _create_hook())]
)
pending_hook3 = RoutePendingNodeHook(
on_pending_node_created=_create_hook(), on_expiration=_create_hook(), checkpoints=[RouteCheckpoint(13, _create_hook())]
)
pending_hook_chain = pending_hook1.chain(pending_hook2, pending_hook3)
pending_hook_chain_2 = pending_hook1.chain(pending_hook2).chain(pending_hook3)
Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
exec_hook_chain,
24 * 60 * 60,
pending_hook_chain,
)
assert len(exec_hook_chain.checkpoints) == 2
assert len(pending_hook_chain.checkpoints) == 3
assert len(pending_hook_chain_2.checkpoints) == 3
exec_hook_chain.on_exec_begin()
pending_hook_chain.on_pending_node_created()
pending_hook_chain_2.on_expiration()
exec_hook_chain.on_exec_skipped()
assert callback1_var == callback1_var_expected
assert callback2_var == callback2_var_expected
def test_route_equality(self):
assert self._route_1_basic() == self._route_1_basic()
assert Route("test", None, None, [], [], False) == Route("test", None, None, [], [], False)
assert Route("test", None, None, [], [], False) != Route("test2", None, None, [], [], False)
assert self._route_1_basic() == self._route_1_basic().clone()
def test_route_check_integrity(self):
route = self._route_1_basic()
assert route.check_integrity(self._route_1_basic())
route2 = self._route_2_two_inputs_linked()
# Route is very sensitive about an integrity check against a different Route. This is very critical
# for whole Routing module. It should not occur! A safe-guard against a high-level (e.g RoutingTable) bug.
with pytest.raises(ValueError):
assert route.check_integrity(route2)
# make id equal so that check move on to other fields
route2._id = route.route_id
assert not route.check_integrity(route2)
assert route.check_integrity(Route(route.route_id, route.link_node, route.output, route._output_dim_matrix, route.slots, False))
assert not route.check_integrity(
Route(route.route_id, route2.link_node, route.output, route._output_dim_matrix, route.slots, False)
)
assert not route.check_integrity(
Route(route.route_id, route.link_node, route2.output, route._output_dim_matrix, route.slots, False)
)
assert not route.check_integrity(Route(route.route_id, route.link_node, route.output, [], route.slots, False))
assert not route.check_integrity(Route(route.route_id, route.link_node, route.output, route._output_dim_matrix, [], False))
def test_route_check_integrity_noops(self):
"""show that some type of changes in route should not invalidate the integrity"""
route = self._route_3_three_inputs_linked()
# dim matrix ordering should not alter the semantics of route
new_route = copy.deepcopy(route)
new_route.link_node.link_matrix.reverse()
new_route.output_dim_matrix.reverse()
# TODO evaluate slots order? currently impacting integrity but not as critical as dim matrice
assert route.check_integrity(new_route)
@pytest.mark.parametrize(
"execution_hook_1, pending_node_ttl_1, pending_hook_1, execution_hook_2, pending_node_ttl_2, pending_hook_2, result",
[
(None, 30 * 24 * 60 * 60, None, None, 24 * 60 * 60, None, False),
(
RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_create_hook(),
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[RouteCheckpoint(checkpoint_in_secs=5, slot=_create_hook())],
),
30 * 24 * 60 * 60,
RoutePendingNodeHook(
on_pending_node_created=_create_hook(),
on_expiration=_create_hook(),
checkpoints=[RouteCheckpoint(checkpoint_in_secs=1, slot=_create_hook()), RouteCheckpoint(2, _create_hook())],
),
RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_create_hook(),
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[RouteCheckpoint(5, _create_hook())],
),
30 * 24 * 60 * 60,
RoutePendingNodeHook(
on_pending_node_created=_create_hook(),
on_expiration=_create_hook(),
# also test that checkpoint other should not matter as long as values are same
checkpoints=[RouteCheckpoint(2, _create_hook()), RouteCheckpoint(1, _create_hook())],
),
True,
),
(
RouteExecutionHook(on_exec_begin=_create_hook()),
30 * 24 * 60 * 60,
RoutePendingNodeHook(),
RouteExecutionHook(on_exec_begin=_create_hook()),
30 * 24 * 60 * 60,
RoutePendingNodeHook(),
True,
),
(
RouteExecutionHook(on_exec_begin=_create_hook("print('diff')")),
30 * 24 * 60 * 60,
RoutePendingNodeHook(),
RouteExecutionHook(on_exec_begin=_create_hook()),
30 * 24 * 60 * 60,
RoutePendingNodeHook(),
False,
),
(None, None, None, None, None, None, True),
(
RouteExecutionHook(on_exec_begin=None, on_exec_skipped=None),
None,
None,
RouteExecutionHook(on_exec_begin=None, on_exec_skipped=_create_hook()),
None,
None,
False,
),
(
RouteExecutionHook(on_exec_begin=None, on_exec_skipped=_create_hook()),
None,
None,
RouteExecutionHook(on_exec_begin=None, on_exec_skipped=None),
None,
None,
False,
),
(
RouteExecutionHook(
on_exec_begin=None,
on_exec_skipped=None,
on_compute_success=None,
on_compute_failure=None,
on_success=None,
on_failure=None,
checkpoints=[RouteCheckpoint(1, _create_hook())],
),
None,
RoutePendingNodeHook(),
RouteExecutionHook(
on_exec_begin=None,
on_exec_skipped=None,
on_compute_success=None,
on_compute_failure=None,
on_success=None,
on_failure=None,
# change the value of first checkpoint
checkpoints=[RouteCheckpoint(5, _create_hook())],
),
None,
RoutePendingNodeHook(),
False,
),
(
RouteExecutionHook(),
None,
RoutePendingNodeHook(
on_pending_node_created=_create_hook(), on_expiration=None, checkpoints=[RouteCheckpoint(2, _create_hook())]
),
RouteExecutionHook(),
None,
RoutePendingNodeHook(
on_pending_node_created=_create_hook(),
on_expiration=None,
# also test that checkpoint other should not matter as long as values are same
checkpoints=[RouteCheckpoint(1, _create_hook())],
),
False,
),
(
None,
None,
RoutePendingNodeHook(on_pending_node_created=None, on_expiration=None, checkpoints=[RouteCheckpoint(1, _create_hook())]),
None,
None,
RoutePendingNodeHook(
on_pending_node_created=None,
on_expiration=None,
# also test that checkpoint other should not matter as long as values are same
checkpoints=[RouteCheckpoint(1, _create_hook("print('diff 2')"))],
),
False,
),
],
)
def test_route_check_auxiliary_integrity(
self, execution_hook_1, pending_node_ttl_1, pending_hook_1, execution_hook_2, pending_node_ttl_2, pending_hook_2, result
):
route = self._route_1_basic()
assert (
Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
execution_hook_1,
pending_node_ttl_1,
pending_hook_1,
).check_auxiliary_data_integrity(
Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
execution_hook_2,
pending_node_ttl_2,
pending_hook_2,
)
)
== result
)
def test_route_serialization(self):
route = self._route_1_basic()
assert route == loads(dumps(route))
def test_route_receive_basic(self):
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
route = self._route_1_basic()
# route will reject incompatible signal
assert not route.receive(create_incoming_signal(TestSignal.signal_s3_1, [1]))
assert not route._pending_nodes
# successful trigger # 1
response: Optional[Route.Response] = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [1]))
assert response
assert len(response.new_execution_contexts) == 1
assert response.new_execution_contexts[0].slots
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({1: {}})
)
# since the node completed immediately (since it has only one input),
# also removed from the internal pending nodes.
assert not route._pending_nodes
# successful trigger # 2
response: Optional[Route.Response] = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [2]))
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({2: {}})
)
# since the node completed immediately (since it has only one input),
# also removed | |
<reponame>pgniewko/deep-toxin
# -*- coding: utf-8 -*-
"""
##############################################################################
The calculation of molecular constitutional indices based on its topological
structure. You can get 30 molecular connectivity descriptors. You can freely
use and distribute it. If you hava any problem, you could contact with us timely!
Authors: <NAME> and <NAME>.
Date: 2012.09.18
Email: <EMAIL>
##############################################################################
"""
from rdkit import Chem
# from rdkit.Chem import rdchem
from rdkit.Chem import Lipinski as LPK
# import math
Version = 1.0
#############################################################
def CalculateMolWeight(mol):
"""
#################################################################
Calculation of molecular weight
Note that not including H
---->Weight
Usage:
result=CalculateMolWeight(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
MolWeight = 0
for atom in mol.GetAtoms():
MolWeight = MolWeight + atom.GetMass()
return MolWeight
def CalculateAverageMolWeight(mol):
"""
#################################################################
Calcualtion of average molecular weight
Note that not including H
---->AWeight
Usage:
result=CalculateAverageMolWeight(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
MolWeight = 0
for atom in mol.GetAtoms():
MolWeight = MolWeight + atom.GetMass()
return MolWeight / mol.GetNumAtoms()
def CalculateHydrogenNumber(mol):
"""
#################################################################
Calculation of Number of Hydrogen in a molecule
---->nhyd
Usage:
result=CalculateHydrogenNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
i = 0
Hmol = Chem.AddHs(mol)
for atom in Hmol.GetAtoms():
if atom.GetAtomicNum() == 1:
i = i + 1
return i
def CalculateHalogenNumber(mol):
"""
#################################################################
Calculation of Halogen counts in a molecule
---->nhal
Usage:
result=CalculateHalogenNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
i = 0
for atom in mol.GetAtoms():
if (
atom.GetAtomicNum() == 9
or atom.GetAtomicNum() == 17
or atom.GetAtomicNum() == 35
or atom.GetAtomicNum() == 53
):
i = i + 1
return i
def CalculateHeteroNumber(mol):
"""
#################################################################
Calculation of Hetero counts in a molecule
---->nhet
Usage:
result=CalculateHeteroNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
i = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 6 or atom.GetAtomicNum() == 1:
i = i + 1
return mol.GetNumAtoms() - i
def CalculateHeavyAtomNumber(mol):
"""
#################################################################
Calculation of Heavy atom counts in a molecule
---->nhev
Usage:
result=CalculateHeavyAtomNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return mol.GetNumAtoms(onlyHeavy=1)
def _CalculateElementNumber(mol, AtomicNumber=6):
"""
#################################################################
**Internal used only**
Calculation of element counts with atomic number equal to n in a molecule
#################################################################
"""
i = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == AtomicNumber:
i = i + 1
return i
def CalculateFluorinNumber(mol):
"""
#################################################################
Calculation of Fluorin counts in a molecule
---->ncof
Usage:
result=CalculateFluorinNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return _CalculateElementNumber(mol, AtomicNumber=9)
def CalculateChlorinNumber(mol):
"""
#################################################################
Calculation of Chlorin counts in a molecule
---->ncocl
Usage:
result=CalculateChlorinNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return _CalculateElementNumber(mol, AtomicNumber=17)
def CalculateBromineNumber(mol):
"""
#################################################################
Calculation of Bromine counts in a molecule
---->ncobr
Usage:
result=CalculateBromineNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return _CalculateElementNumber(mol, AtomicNumber=35)
def CalculateIodineNumber(mol):
"""
#################################################################
Calculation of Iodine counts in a molecule
---->ncoi
Usage:
result=CalculateIodineNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return _CalculateElementNumber(mol, AtomicNumber=53)
def CalculateCarbonNumber(mol):
"""
#################################################################
Calculation of Carbon number in a molecule
---->ncarb
Usage:
result=CalculateCarbonNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return _CalculateElementNumber(mol, AtomicNumber=6)
def CalculatePhosphorNumber(mol):
"""
#################################################################
Calcualtion of Phosphor number in a molecule
---->nphos
Usage:
result=CalculatePhosphorNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return _CalculateElementNumber(mol, AtomicNumber=15)
def CalculateSulfurNumber(mol):
"""
#################################################################
Calculation of Sulfur counts in a molecule
---->nsulph
Usage:
result=CalculateSulfurNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return _CalculateElementNumber(mol, AtomicNumber=16)
def CalculateOxygenNumber(mol):
"""
#################################################################
Calculation of Oxygen counts in a molecule
---->noxy
Usage:
result=CalculateOxygenNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return _CalculateElementNumber(mol, AtomicNumber=8)
def CalculateNitrogenNumber(mol):
"""
#################################################################
Calculation of Nitrogen counts in a molecule
---->nnitro
Usage:
result=CalculateNitrogenNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return _CalculateElementNumber(mol, AtomicNumber=7)
def CalculateRingNumber(mol):
"""
#################################################################
Calculation of ring counts in a molecule
---->nring
Usage:
result=CalculateRingNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return Chem.GetSSSR(mol)
def CalculateRotationBondNumber(mol):
"""
#################################################################
Calculation of rotation bonds counts in a molecule
---->nrot
Note that this is the same as calculation of single bond
counts in a molecule.
Usage:
result=CalculateRotationBondNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return LPK.NumRotatableBonds(mol)
def CalculateHdonorNumber(mol):
"""
#################################################################
Calculation of Hydrongen bond donor counts in a molecule
---->ndonr
Usage:
result=CalculateHdonorNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return LPK.NumHDonors(mol)
def CalculateHacceptorNumber(mol):
"""
#################################################################
Calculation of Hydrogen bond acceptor counts in a molecule
---->naccr
Usage:
result=CalculateHacceptorNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return LPK.NumHAcceptors(mol)
def CalculateSingleBondNumber(mol):
"""
#################################################################
Calculation of single bond counts in a molecule
---->nsb
Usage:
result=CalculateSingleBondNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
i = 0
for bond in mol.GetBonds():
if bond.GetBondType().name == "SINGLE":
i = i + 1
return i
def CalculateDoubleBondNumber(mol):
"""
#################################################################
Calculation of double bond counts in a molecule
---->ndb
Usage:
result=CalculateDoubleBondNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
i = 0
for bond in mol.GetBonds():
if bond.GetBondType().name == "DOUBLE":
i = i + 1
return i
def CalculateTripleBondNumber(mol):
"""
#################################################################
Calculation of triple bond counts in a molecule
---->ntb
Usage:
result=CalculateTripleBondNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
i = 0
for bond in mol.GetBonds():
if bond.GetBondType().name == "TRIPLE":
i = i + 1
return i
def CalculateAromaticBondNumber(mol):
"""
#################################################################
Calculation of aromatic bond counts in a molecule
---->naro
Usage:
result=CalculateAromaticBondNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
i = 0
for bond in mol.GetBonds():
if bond.GetBondType().name == "AROMATIC":
i = i + 1
return i
def CalculateAllAtomNumber(mol):
"""
#################################################################
Calculation of all atom counts in a molecule
---->nta
Usage:
result=CalculateAllAtomNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return Chem.AddHs(mol).GetNumAtoms()
def _CalculatePathN(mol, PathLength=2):
"""
#################################################################
*Internal Use Only*
Calculation of the counts of path length N for a molecule
---->PC1-PC6
Usage:
result=CalculateMolWeight(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return len(Chem.FindAllPathsOfLengthN(mol, PathLength, useBonds=1))
def CalculatePath1(mol):
"""
#################################################################
Calculation of the counts of path length 1 for a molecule
#################################################################
"""
return _CalculatePathN(mol, 1)
def CalculatePath2(mol):
"""
#################################################################
Calculation of the counts of path length 2 for a molecule
#################################################################
"""
return _CalculatePathN(mol, 2)
def CalculatePath3(mol):
"""
#################################################################
Calculation of the counts of path length 3 for a molecule
#################################################################
"""
return _CalculatePathN(mol, 3)
def CalculatePath4(mol):
"""
#################################################################
Calculation of the counts of path length 4 for a molecule
#################################################################
"""
return _CalculatePathN(mol, 4)
def CalculatePath5(mol):
"""
#################################################################
Calculation of the counts of path length 5 for a molecule
#################################################################
"""
return _CalculatePathN(mol, 5)
def CalculatePath6(mol):
"""
#################################################################
Calculation of the counts of path length 6 for a molecule
#################################################################
"""
| |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import cPickle as pickle
from uuid import UUID
class HFindTwoApplyElementsWithTraceLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HFindTwoApplyElementsWithTraceLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HFindTwoApplyElementsWithTraceLHS, self).__init__(name='HFindTwoApplyElementsWithTraceLHS', num_nodes=7, edges=[])
# Add the edges
self.add_edges([(3, 0), (0, 5), (6, 1), (1, 4), (5, 2), (2, 4)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__GM2AUTOSAR_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = """if PreNode('4')['classtype'] == PreNode('3')['classtype']:
if len([i for i in graph.neighbors(PreNode('4').index) if graph.vs[i]['mm__'] == 'apply_contains']) == 0:
return True
return False
"""
self["name"] = """"""
self["GUID__"] = UUID('02cd9831-fcc7-4958-9de6-3053378bf1c6')
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_label__"] = """10"""
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[0]["mm__"] = """MT_pre__apply_contains"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["GUID__"] = UUID('119659ed-dfaa-4d7c-99e0-46613f599969')
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__type"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """12"""
self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[1]["mm__"] = """MT_pre__backward_link"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["GUID__"] = UUID('5c3a2b43-0013-4803-b4b3-836c1e4ce7fb')
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_label__"] = """11"""
self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[2]["mm__"] = """MT_pre__trace_link"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["GUID__"] = UUID('d8fb8a92-95ff-4430-89e7-2065538da51b')
self.vs[3]["MT_subtypeMatching__"] = False
self.vs[3]["MT_label__"] = """1"""
self.vs[3]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[3]["mm__"] = """MT_pre__ApplyModel"""
self.vs[3]["MT_dirty__"] = False
self.vs[3]["GUID__"] = UUID('d6405da9-989d-41f9-8fb6-d06bfe674080')
self.vs[4]["MT_subtypeMatching__"] = True
self.vs[4]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_pivotIn__"] = """element1"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__VirtualDevice'
p2
aS'MT_pre__Distributable'
p3
aS'MT_pre__Signal'
p4
aS'MT_pre__ExecFrame'
p5
aS'MT_pre__ECU'
p6
a.""")
self.vs[4]["mm__"] = """MT_pre__MetaModelElement_S"""
self.vs[4]["MT_dirty__"] = False
self.vs[4]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["GUID__"] = UUID('090c34e2-8e32-4255-acd6-0b50a42b7ff0')
self.vs[5]["MT_pivotOut__"] = """element1"""
self.vs[5]["MT_subtypeMatching__"] = True
self.vs[5]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[5]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[5]["MT_label__"] = """3"""
self.vs[5]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__EcuInstance'
p2
aS'MT_pre__System'
p3
aS'MT_pre__SystemMapping'
p4
aS'MT_pre__ComponentPrototype'
p5
aS'MT_pre__SwCompToEcuMapping_component'
p6
aS'MT_pre__CompositionType'
p7
aS'MT_pre__PPortPrototype'
p8
aS'MT_pre__SwcToEcuMapping'
p9
aS'MT_pre__SoftwareComposition'
p10
aS'MT_pre__RPortPrototype'
p11
aS'MT_pre__PortPrototype'
p12
aS'MT_pre__ComponentType'
p13
a.""")
self.vs[5]["mm__"] = """MT_pre__MetaModelElement_T"""
self.vs[5]["MT_dirty__"] = False
self.vs[5]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[5]["GUID__"] = UUID('a219eb52-212c-4e1e-a21f-08509bdfa3dc')
self.vs[6]["MT_pivotOut__"] = """element2"""
self.vs[6]["MT_subtypeMatching__"] = True
self.vs[6]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[6]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[6]["MT_label__"] = """4"""
self.vs[6]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__EcuInstance'
p2
aS'MT_pre__System'
p3
aS'MT_pre__SystemMapping'
p4
aS'MT_pre__ComponentPrototype'
p5
aS'MT_pre__SwCompToEcuMapping_component'
p6
aS'MT_pre__CompositionType'
p7
aS'MT_pre__PPortPrototype'
p8
aS'MT_pre__SwcToEcuMapping'
p9
aS'MT_pre__SoftwareComposition'
p10
aS'MT_pre__RPortPrototype'
p11
aS'MT_pre__PortPrototype'
p12
aS'MT_pre__ComponentType'
p13
a.""")
self.vs[6]["mm__"] = """MT_pre__MetaModelElement_T"""
self.vs[6]["MT_dirty__"] = False
self.vs[6]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[6]["GUID__"] = UUID('b8c48f5b-5f5f-4b0b-95ee-03bfdc065909')
def eval_type12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype4(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use | |
"""
Provides a sparse representation of volumetric and/or surface data
The data can be either defined per voxel/vertex (:class:`DenseCifti`) or per parcel (`class:`ParcelCifti`).
The data can be read from NIFTI, GIFTI, or CIFTI files.
Non-sparse volumetric or surface representations can be extracte.
"""
from nibabel.cifti2 import cifti2_axes
from typing import Sequence, Optional, Union
import numpy as np
from fsl.data import image
import nibabel as nib
from fsl.utils.path import addExt
dense_extensions = {
cifti2_axes.BrainModelAxis: '.dconn.nii',
cifti2_axes.ParcelsAxis: '.dpconn.nii',
cifti2_axes.SeriesAxis: '.dtseries.nii',
cifti2_axes.ScalarAxis: '.dscalar.nii',
cifti2_axes.LabelAxis: '.dlabel.nii',
}
parcel_extensions = {
cifti2_axes.BrainModelAxis: '.pdconn.nii',
cifti2_axes.ParcelsAxis: '.pconn.nii',
cifti2_axes.SeriesAxis: '.ptseries.nii',
cifti2_axes.ScalarAxis: '.pscalar.nii',
cifti2_axes.LabelAxis: '.plabel.nii',
}
class Cifti:
"""
Parent class for the two types of CIFTI files.
The type of the CIFTI file is determined by the last axis, which can be one of:
- :py:class:`BrainModelAxis <cifti2_axes.BrainModelAxis>`
- :py:class:`ParcelsAxis <cifti2_axes.ParcelsAxis>`
"""
def __init__(self, arr: np.ndarray, axes: Sequence[Optional[cifti2_axes.Axis]]):
"""
Defines a new dataset in greyordinate space
:param data: (..., N) array for N greyordinates or parcels; can contain Nones for undefined axes
:param axes: sequence of CIFTI axes describing the data along each dimension
"""
self.arr = arr
axes = tuple(axes)
while self.arr.ndim > len(axes):
axes = (None, ) + axes
self.axes = axes
if not all(ax is None or len(ax) == sz for ax, sz in zip(axes, self.arr.shape)):
raise ValueError(f"Shape of axes {tuple(-1 if ax is None else len(ax) for ax in axes)} does not "
f"match shape of array {self.arr.shape}")
def to_cifti(self, default_axis=None):
"""
Create a CIFTI image from the data
:param default_axis: What to use as an axis along any undefined dimensions
- By default an error is raised
- if set to "scalar" a ScalarAxis is used with names of "default {index}"
- if set to "series" a SeriesAxis is used
:return: nibabel CIFTI image
"""
if any(ax is None for ax in self.axes):
if default_axis is None:
raise ValueError("Can not store to CIFTI without defining what is stored along each dimension")
elif default_axis == 'scalar':
def get_axis(n: int):
return cifti2_axes.ScalarAxis([f'default {idx + 1}' for idx in range(n)])
elif default_axis == 'series':
def get_axis(n: int):
return cifti2_axes.SeriesAxis(0, 1, n)
else:
raise ValueError(f"default_axis should be set to None, 'scalar', or 'series', not {default_axis}")
new_axes = [
get_axis(sz) if ax is None else ax
for ax, sz in zip(self.axes, self.arr.shape)
]
else:
new_axes = list(self.axes)
data = self.arr
if data.ndim == 1:
# CIFTI axes are always at least 2D
data = data[None, :]
new_axes.insert(0, cifti2_axes.ScalarAxis(['default']))
return nib.Cifti2Image(data, header=new_axes)
@classmethod
def from_cifti(cls, filename, writable=False):
"""
Creates new greyordinate object from dense CIFTI file
:param filename: CIFTI filename or :class:`nib.Cifti2Image` object
:param writable: if True, opens data array in writable mode
"""
if isinstance(filename, str):
img = nib.load(filename)
else:
img = filename
if not isinstance(img, nib.Cifti2Image):
raise ValueError(f"Input {filename} should be CIFTI filename or nibabel Cifti2Image")
if writable:
data = np.memmap(filename, img.dataobj.dtype, mode='r+',
offset=img.dataobj.offset, shape=img.shape, order='F')
else:
data = np.asanyarray(img.dataobj)
axes = [img.header.get_axis(idx) for idx in range(data.ndim)]
if isinstance(axes[-1], cifti2_axes.BrainModelAxis):
return DenseCifti(data, axes)
elif isinstance(axes[-1], cifti2_axes.ParcelsAxis):
return ParcelCifti(data, axes)
raise ValueError("Last axis of CIFTI object should be a BrainModelAxis or ParcelsAxis")
def save(self, cifti_filename, default_axis=None):
"""
Writes this sparse representation to/from a filename
:param cifti_filename: output filename
:param default_axis: What to use as an axis along any undefined dimensions
- By default an error is raised
- if set to "scalar" a ScalarAxis is used with names of "default {index}"
- if set to "series" a SeriesAxis is used
:return:
"""
self.to_cifti(default_axis).to_filename(addExt(cifti_filename, defaultExt=self.extension, mustExist=False))
@classmethod
def from_gifti(cls, filename, mask_values=(0, np.nan)):
"""
Creates a new greyordinate object from a GIFTI file
:param filename: GIFTI filename
:param mask_values: values to mask out
:return: greyordinate object representing the unmasked vertices
"""
if isinstance(filename, str):
img = nib.load(filename)
else:
img = filename
datasets = [darr.data for darr in img.darrays]
if len(datasets) == 1:
data = datasets[0]
else:
data = np.concatenate(
[np.atleast_2d(d) for d in datasets], axis=0
)
mask = np.ones(data.shape, dtype='bool')
for value in mask_values:
if value is np.nan:
mask &= ~np.isnan(data)
else:
mask &= ~(data == value)
while mask.ndim > 1:
mask = mask.any(0)
anatomy = BrainStructure.from_gifti(img)
bm_axes = cifti2_axes.BrainModelAxis.from_mask(mask, name=anatomy.cifti)
return DenseCifti(data[..., mask], [bm_axes])
@classmethod
def from_image(cls, input, mask_values=(np.nan, 0)):
"""
Creates a new greyordinate object from a NIFTI file
:param input: FSL :class:`image.Image` object
:param mask_values: which values to mask out
:return: greyordinate object representing the unmasked voxels
"""
img = image.Image(input)
mask = np.ones(img.data.shape, dtype='bool')
for value in mask_values:
if value is np.nan:
mask &= ~np.isnan(img.data)
else:
mask &= ~(img.data == value)
while mask.ndim > 3:
mask = mask.any(-1)
if np.sum(mask) == 0:
raise ValueError("No unmasked voxels found in NIFTI image")
inverted_data = np.transpose(img.data[mask], tuple(range(1, img.data.ndim - 2)) + (0, ))
bm_axes = cifti2_axes.BrainModelAxis.from_mask(mask, affine=img.nibImage.affine)
return DenseCifti(inverted_data, [bm_axes])
class DenseCifti(Cifti):
"""
Represents sparse data defined for a subset of voxels and vertices (i.e., greyordinates)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(self.brain_model_axis, cifti2_axes.BrainModelAxis):
raise ValueError(f"DenseCifti expects a BrainModelAxis as last axes object, not {type(self.brain_model_axis)}")
@property
def brain_model_axis(self, ) -> cifti2_axes.BrainModelAxis:
return self.axes[-1]
@property
def extension(self, ):
if self.arr.ndim == 1:
return dense_extensions[cifti2_axes.ScalarAxis]
return dense_extensions[type(self.axes[-2])]
def to_image(self, fill=0) -> image.Image:
"""
Get the volumetric data as an :class:`image.Image`
"""
if self.brain_model_axis.volume_mask.sum() == 0:
raise ValueError(f"Can not create volume without voxels in {self}")
data = np.full(self.brain_model_axis.volume_shape + self.arr.shape[:-1], fill,
dtype=self.arr.dtype)
voxels = self.brain_model_axis.voxel[self.brain_model_axis.volume_mask]
data[tuple(voxels.T)] = np.transpose(self.arr, (-1,) + tuple(range(self.arr.ndim - 1)))[
self.brain_model_axis.volume_mask]
return image.Image(data, xform=self.brain_model_axis.affine)
def surface(self, anatomy, fill=np.nan, partial=False):
"""
Gets a specific surface
If `partial` is True a view of the data rather than a copy is returned.
:param anatomy: BrainStructure or string like 'CortexLeft' or 'CortexRight'
:param fill: which value to fill the array with if not all vertices are defined
:param partial: only return the part of the surface defined in the greyordinate file (ignores `fill` if set)
:return:
- if not partial: (..., n_vertices) array
- if partial: tuple with (N, ) int array with indices on the surface included in (..., N) array
"""
if isinstance(anatomy, str):
anatomy = BrainStructure.from_string(anatomy, issurface=True)
if anatomy.cifti not in self.brain_model_axis.name:
raise ValueError(f"No surface data for {anatomy.cifti} found")
slc, bm = None, None
arr = np.full(self.arr.shape[:-1] + (self.brain_model_axis.nvertices[anatomy.cifti],), fill,
dtype=self.arr.dtype)
for name, slc_try, bm_try in self.brain_model_axis.iter_structures():
if name == anatomy.cifti:
if partial:
if bm is not None:
raise ValueError(f"Surface {anatomy} does not form a contiguous block")
slc, bm = slc_try, bm_try
else:
arr[..., bm_try.vertex] = self.arr[..., slc_try]
if not partial:
return arr
else:
return bm.vertex, self.arr[..., slc]
class ParcelCifti(Cifti):
"""
Represents sparse data defined at specific parcels
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(self.parcel_axis, cifti2_axes.ParcelsAxis):
raise ValueError(f"ParcelCifti expects a ParcelsAxis as last axes object, not {type(self.parcel_axis)}")
@property
def extension(self, ):
if self.arr.ndim == 1:
return parcel_extensions[cifti2_axes.ScalarAxis]
return parcel_extensions[type(self.axes[-2])]
@property
def parcel_axis(self, ) -> cifti2_axes.ParcelsAxis:
return self.axes[-1]
def to_image(self, fill=0):
"""
Get the volumetric data as an :class:`Image`
"""
data = np.full(self.parcel_axis.volume_shape + self.arr.shape[:-1], fill, dtype=self.arr.dtype)
written = np.zeros(self.parcel_axis.volume_shape, dtype='bool')
for idx, write_to in enumerate(self.parcel_axis.voxels):
if written[tuple(write_to.T)].any():
raise ValueError("Duplicate voxels in different parcels")
data[tuple(write_to.T)] = self.arr[np.newaxis, ..., idx]
written[tuple(write_to.T)] = True
if not written.any():
raise ValueError("Parcellation does not contain any volumetric data")
return image.Image(data, xform=self.parcel_axis.affine)
def surface(self, anatomy, fill=np.nan, partial=False):
"""
Gets a specific surface
:param anatomy: BrainStructure or string like 'CortexLeft' or 'CortexRight'
:param fill: which value to fill the array with if not all vertices are defined
:param partial: only return the part of the surface defined in the greyordinate file (ignores `fill` if set)
:return:
- if not partial: (..., n_vertices) array
- if partial: tuple with (N, ) int array with indices on the surface included in (..., N) array
"""
if isinstance(anatomy, str):
anatomy = BrainStructure.from_string(anatomy, issurface=True)
if anatomy.cifti not in self.parcel_axis.nvertices:
raise ValueError(f"No surface data for {anatomy.cifti} found")
arr = np.full(self.arr.shape[:-1] + (self.parcel_axis.nvertices[anatomy.cifti],), fill,
dtype=self.arr.dtype)
written = np.zeros(self.parcel_axis.nvertices[anatomy.cifti], dtype='bool')
for idx, vertices in enumerate(self.parcel_axis.vertices):
if anatomy.cifti not in vertices:
continue
write_to = vertices[anatomy.cifti]
if written[write_to].any():
raise ValueError("Duplicate vertices in different parcels")
arr[..., write_to] = self.arr[..., idx, np.newaxis]
| |
"""Test ``sfini.execution._execution``."""
from sfini.execution import _execution as tscr
import pytest
from unittest import mock
import sfini
import datetime
import json
from sfini.execution import history
@pytest.fixture
def session():
"""AWS session mock."""
return mock.MagicMock(autospec=sfini.AWSSession)
class TestExecution:
"""Test ``sfini.execution._execution.Execution``."""
@pytest.fixture
def eg_input(self):
"""Example execution input."""
return {"a": 42, "b": "bla", "c": {"foo": [1, 2], "bar": None}}
@pytest.fixture
def execution(self, session, eg_input):
"""An example Execution instance."""
return tscr.Execution(
"spam",
"bla-sm:arn",
eg_input,
arn="spam:arn",
session=session)
def test_init(self, execution, session, eg_input):
"""Execution initialisation."""
assert execution.name == "spam"
assert execution.state_machine_arn == "bla-sm:arn"
assert execution.execution_input == eg_input
assert execution.session is session
class TestStr:
"""Execution stringification."""
def test_no_status(self, execution):
"""Execution status is unknown."""
res = str(execution)
assert "spam" in res
def test_with_status(self, execution):
"""Execution status is known."""
execution._status = "SUCCEEDED"
res = str(execution)
assert "spam" in res
assert "SUCCEEDED" in res
class TestRepr:
"""Execution string representation."""
def test_with_arn_container_input(self, execution, session):
"""ARN provided and execution input is a container."""
execution.execution_input = {"a": 42, "b": "bla", "c": [1, 2] * 20}
exp_pref = "Execution("
exp_pos = "'spam', 'bla-sm:arn', len(execution_input)=3"
exp_kw_a = ", arn='spam:arn', session=%r)" % session
exp_kw_b = ", session=%r, arn='spam:arn')" % session
exp_a = exp_pref + exp_pos + exp_kw_a
exp_b = exp_pref + exp_pos + exp_kw_b
res = repr(execution)
assert res in (exp_a, exp_b)
def test_no_arn_container_input(self, execution, session):
"""ARN provided and execution input is a container."""
execution.execution_input = {"a": 42, "b": "bla", "c": [1, 2] * 20}
execution.arn = None
exp_pref = "Execution("
exp_pos = "'spam', 'bla-sm:arn', len(execution_input)=3"
exp_kw = ", session=%r)" % session
exp = exp_pref + exp_pos + exp_kw
res = repr(execution)
assert res == exp
def test_with_arn_scalar_input(self, execution, session):
"""ARN provided and execution input is a scalar."""
execution.execution_input = 42
exp_pref = "Execution("
exp_pos = "'spam', 'bla-sm:arn'"
exp_kw_1 = "execution_input=42"
exp_kw_2 = "arn='spam:arn'"
exp_kw_3 = "session=%r" % session
exp_kws = [
", " + exp_kw_1 + ", " + exp_kw_2 + ", " + exp_kw_3 + ")",
", " + exp_kw_1 + ", " + exp_kw_3 + ", " + exp_kw_2 + ")",
", " + exp_kw_2 + ", " + exp_kw_1 + ", " + exp_kw_3 + ")",
", " + exp_kw_2 + ", " + exp_kw_3 + ", " + exp_kw_1 + ")",
", " + exp_kw_3 + ", " + exp_kw_1 + ", " + exp_kw_2 + ")",
", " + exp_kw_3 + ", " + exp_kw_2 + ", " + exp_kw_1 + ")"]
exps = [exp_pref + exp_pos + exp_kw for exp_kw in exp_kws]
res = repr(execution)
assert res in exps
def test_from_arn(self, session):
"""Construction of Execution by querying AWS."""
# Setup environment
now = datetime.datetime.now()
input_ = {"a": 42, "b": "bla", "c": {"foo": [1, 2], "bar": None}}
output = {"foo": [1, 2], "bar": None}
resp = {
"executionArn": "spam:arn",
"stateMachineArn": "bla-sm:arn",
"name": "spam",
"status": "SUCCEEDED",
"startDate": now - datetime.timedelta(hours=1),
"stopDate": now - datetime.timedelta(minutes=50),
"input": json.dumps(input_),
"output": json.dumps(output)}
session.sfn.describe_execution.return_value = resp
# Build input
arn = "spam:arn"
# Run function
res = tscr.Execution.from_arn(arn, session=session)
# Check result
assert isinstance(res, tscr.Execution)
assert res.name == "spam"
assert res.state_machine_arn == "bla-sm:arn"
assert res.execution_input == input_
assert res.arn == "spam:arn"
assert res.session is session
assert res._status == "SUCCEEDED"
assert res._start_date == now - datetime.timedelta(hours=1)
assert res._stop_date == now - datetime.timedelta(minutes=50)
assert res._output == {"foo": [1, 2], "bar": None}
session.sfn.describe_execution.assert_called_once_with(
executionArn="spam:arn")
def test_from_list_item(self, session):
"""Construction of Execution after querying AWS."""
now = datetime.datetime.now()
item = {
"executionArn": "spam:arn",
"stateMachineArn": "bla-sm:arn",
"name": "spam",
"status": "SUCCEEDED",
"startDate": now - datetime.timedelta(hours=1),
"stopDate": now - datetime.timedelta(minutes=50)}
# Run function
res = tscr.Execution.from_list_item(item, session=session)
# Check result
assert isinstance(res, tscr.Execution)
assert res.name == "spam"
assert res.state_machine_arn == "bla-sm:arn"
assert res.execution_input is res._not_provided
assert res.arn == "spam:arn"
assert res.session is session
assert res._status == "SUCCEEDED"
assert res._start_date == now - datetime.timedelta(hours=1)
assert res._stop_date == now - datetime.timedelta(minutes=50)
class TestStatus:
"""Execution status provided by AWS."""
@pytest.mark.parametrize("status", [None, "RUNNING"])
def test_unknown(self, execution, status):
"""Execution status is not currently known."""
def _update():
execution._status = "TIMED_OUT"
execution._update = mock.Mock(side_effect=_update)
execution._status = status
res = execution.status
assert res == "TIMED_OUT"
execution._update.assert_called_once_with()
@pytest.mark.parametrize(
"status",
["SUCCEEDED", "FAILED", "ABORTED", "TIMED_OUT"])
def test_known(self, execution, status):
"""Execution status is known."""
execution._update = mock.Mock()
execution._status = status
res = execution.status
assert res == status
execution._update.assert_not_called()
class TestStartTime:
"""Execution start-time provided by AWS."""
def test_unknown(self, execution):
"""Execution start-time is not already known."""
def _update():
execution._start_date = now - datetime.timedelta(minutes=10)
now = datetime.datetime.now()
execution._update = mock.Mock(side_effect=_update)
execution._start_date = None
res = execution.start_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_called_once_with()
def test_known(self, execution):
"""Execution start-time is known."""
now = datetime.datetime.now()
execution._update = mock.Mock()
execution._start_date = now - datetime.timedelta(minutes=10)
res = execution.start_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_not_called()
class TestStopTime:
"""Execution stop-time provided by AWS."""
def test_unknown(self, execution):
"""Execution stop-time is not already known."""
def _update():
execution._stop_date = now - datetime.timedelta(minutes=10)
now = datetime.datetime.now()
execution._update = mock.Mock(side_effect=_update)
execution._raise_unfinished = mock.Mock()
execution._stop_date = None
res = execution.stop_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_called_once_with()
execution._raise_unfinished.assert_called_once_with()
def test_known(self, execution):
"""Execution stop-time is known."""
now = datetime.datetime.now()
execution._update = mock.Mock()
execution._raise_unfinished = mock.Mock()
execution._stop_date = now - datetime.timedelta(minutes=10)
res = execution.stop_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_not_called()
execution._raise_unfinished.assert_not_called()
class TestOutput:
"""Execution output provided by AWS."""
def test_unknown(self, execution):
"""Execution output is not already known."""
def _update():
execution._output = {"foo": [1, 2], "bar": None}
execution._update = mock.Mock(side_effect=_update)
execution._raise_unfinished = mock.Mock()
execution._raise_on_failure = mock.Mock()
execution._output = tscr._default
res = execution.output
assert res == {"foo": [1, 2], "bar": None}
execution._update.assert_called_once_with()
execution._raise_unfinished.assert_called_once_with()
execution._raise_on_failure.assert_called_once_with()
def test_known(self, execution):
"""Execution output is known."""
execution._update = mock.Mock()
execution._raise_unfinished = mock.Mock()
execution._raise_on_failure = mock.Mock()
execution._output = {"foo": [1, 2], "bar": None}
res = execution.output
assert res == {"foo": [1, 2], "bar": None}
execution._update.assert_not_called()
execution._raise_unfinished.assert_not_called()
execution._raise_on_failure.assert_not_called()
class TestUpdate:
"""Execution details updating by querying AWS."""
@pytest.mark.parametrize(
("status", "input_"),
[
(None, tscr._default),
("RUNNING", tscr._default),
(None, {"a": 42, "c": {"foo": [1, 2], "bar": None}}),
("SUCCEEDED", tscr._default)])
def test_query(self, execution, session, status, input_):
"""A query of AWS is performed."""
# Setup environment
now = datetime.datetime.now()
rinput_ = {"a": 42, "c": {"foo": [1, 2], "bar": None}}
output = {"foo": [1, 2], "bar": None}
resp = {
"executionArn": "spam:arn",
"stateMachineArn": "bla-sm:arn",
"name": "spam",
"status": "SUCCEEDED",
"startDate": now - datetime.timedelta(hours=1),
"stopDate": now - datetime.timedelta(minutes=50),
"input": json.dumps(rinput_),
"output": json.dumps(output)}
session.sfn.describe_execution.return_value = resp
execution._raise_no_arn = mock.Mock()
execution._status = status
execution.execution_input = input_
# Run function
execution._update()
# Check result
assert execution._status == "SUCCEEDED"
assert execution._start_date == now - datetime.timedelta(hours=1)
assert execution._stop_date == now - datetime.timedelta(minutes=50)
assert execution._output == {"foo": [1, 2], "bar": None}
session.sfn.describe_execution.assert_called_once_with(
executionArn="spam:arn")
execution._raise_no_arn.assert_called_once_with()
def test_finished(self, execution, session):
"""No query of AWS is performed."""
execution._raise_no_arn = mock.Mock()
execution._status = "SUCCEEDED"
execution._update()
session.sfn.describe_execution.assert_not_called()
execution._raise_no_arn.assert_not_called()
class TestRaiseOnFailure:
"""Raising on execution failure."""
@pytest.mark.parametrize("status", ["FAILED", "ABORTED", "TIMED_OUT"])
def test_failure(self, execution, status):
"""Execution has failed."""
execution._status = status
with pytest.raises(RuntimeError) as e:
execution._raise_on_failure()
assert "spam" in str(e.value)
assert status in str(e.value)
@pytest.mark.parametrize("status", ["RUNNING", "SUCCEEDED"])
def test_not_failure(self, execution, status):
"""Execution has not failed."""
execution._status = status
execution._raise_on_failure()
class TestRaiseUnfinished:
"""Raising when execution is unfinished."""
def test_unfinished(self, execution):
"""Execution hasn't finished."""
execution._status = "RUNNING"
with pytest.raises(RuntimeError) as e:
execution._raise_unfinished()
assert "spam" in str(e.value)
assert "finish" in str(e.value)
@pytest.mark.parametrize(
"status",
["FAILED", "ABORTED", "TIMED_OUT", "SUCCEEDED"])
def test_finished(self, execution, status):
"""Execution has finished."""
execution._status = status
execution._raise_unfinished()
class TestRaiseNoArn:
"""Raising when no ARN is provided to execution."""
def test_no_arn(self, execution):
"""Execution has no associated ARN."""
execution.arn = None
with pytest.raises(RuntimeError) as e:
execution._raise_no_arn()
assert "ARN" in str(e.value)
assert "spam" in str(e.value)
def test_finished(self, execution):
"""Execution has finished."""
execution._raise_no_arn()
def test_start(self, execution, session, eg_input):
"""Execution starting."""
# Setup environment
now = datetime.datetime.now()
resp = {"executionArn": "spam:arn", "startDate": now}
session.sfn.start_execution.return_value = resp
execution.arn = None
# Run function
execution.start()
# Check result
assert execution.arn == "spam:arn"
assert execution._start_date == now
assert execution._status == "RUNNING"
session.sfn.start_execution.assert_called_once_with(
stateMachineArn="bla-sm:arn",
name="spam",
input=mock.ANY)
res_se_call = session.sfn.start_execution.call_args_list[0]
res_input_str = res_se_call[1]["input"]
assert json.loads(res_input_str) == eg_input
def test_start_default_input(self, execution, session):
"""Execution starting."""
# Setup environment
now = datetime.datetime.now()
resp = {"executionArn": "spam:arn", "startDate": now}
session.sfn.start_execution.return_value = resp
execution.arn = None
execution.execution_input = tscr._default
# Run function
execution.start()
# | |
= Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1237 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1238 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1239 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1240 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1241 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1242 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1243 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1244 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1245 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1246 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1247 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1248 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1249 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1250 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1251 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1252 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1253 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1254 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1255 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1256 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1257 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1258 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1259 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1260 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1261 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1262 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1263 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1264 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1265 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1266 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1267 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1268 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1269 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1270 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1271 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1272 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1273 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1274 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1275 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1276 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1277 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1278 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1279 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1280 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1281 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1282 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1283 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1284 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1285 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1286 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1287 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1288 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1289 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1290 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1291 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1292 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1293 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1294 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1295 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1296 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1297 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1298 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1299 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1300 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1301 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1302 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1303 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1304 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1305 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1306 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1307 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1308 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1309 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1310 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1311 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1312 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1313 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1314 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1315 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1316 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1317 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1318 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1319 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1320 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1321 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1322 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1323 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1324 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1325 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1326 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1327 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1328 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1329 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1330 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1331 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1332 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1333 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1334 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1335 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1336 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1337 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1338 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1339 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1340 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1341 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1342 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1343 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1344 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1345 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1346 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1347 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1348 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1349 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1350 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1351 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1352 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1353 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1354 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1355 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1356 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1357 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1358 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1359 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1360 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1361 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1362 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1363 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1364 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1365 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1366 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1367 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1368 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1369 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1370 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1371 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1372 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1373 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1374 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1375 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1376 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1377 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1378 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1379 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1380 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1381 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1382 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1383 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1384 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1385 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1386 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1387 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1388 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1389 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1390 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1391 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1392 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1393 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1394 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1395 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1396 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1397 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1398 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1399 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1400 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1401 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1402 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1403 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1404 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1405 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1406 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1407 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1408 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1409 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1410 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1411 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1412 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1413 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1414 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1415 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1416 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1417 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1418 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1419 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1420 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1421 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1422 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1423 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1424 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1425 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1426 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1427 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1428 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1429 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1430 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1431 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1432 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1433 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1434 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1435 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1436 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1437 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1438 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1439 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1440 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1441 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1442 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1443 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1444 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1445 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1446 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1447 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1448 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1449 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1450 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1451 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1452 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1453 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1454 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1455 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1456 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1457 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1458 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1459 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1460 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1461 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1462 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1463 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1464 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1465 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1466 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1467 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1468 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1469 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1470 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1471 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1472 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1473 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1474 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1475 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1476 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1477 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1478 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1479 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1480 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1481 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1482 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1483 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1484 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1485 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1486 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1487 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1488 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1489 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1490 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1491 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1492 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1493 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1494 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1495 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1496 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1497 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1498 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1499 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1500 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1501 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1502 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1503 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1504 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1505 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1506 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1507 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1508 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1509 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1510 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1511 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1512 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1513 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1514 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1515 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1516 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1517 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1518 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1519 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1520 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1521 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1522 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1523 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1524 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1525 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1526 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1527 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1528 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1529 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1530 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1531 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1532 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1533 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1534 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1535 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1536 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1537 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1538 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1539 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1540 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1541 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1542 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1543 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1544 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1545 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1546 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1547 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1548 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1549 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1550 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1551 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1552 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1553 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1554 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1555 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1556 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1557 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1558 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1559 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1560 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1561 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1562 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1563 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1564 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1565 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1566 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1567 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1568 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1569 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1570 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1571 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1572 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1573 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1574 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1575 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1576 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1577 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1578 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1579 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1580 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1581 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1582 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1583 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1584 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1585 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1586 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1587 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1588 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1589 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1590 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1591 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1592 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1593 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1594 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1595 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1596 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1597 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1598 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1599 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1600 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1601 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1602 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1603 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1604 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1605 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1606 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1607 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1608 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1609 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1610 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1611 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1612 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1613 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1614 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1615 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1616 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1617 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1618 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1619 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1620 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1621 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1622 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1623 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1624 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1625 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1626 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1627 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1628 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1629 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1630 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1631 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1632 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1633 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1634 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1635 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1636 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1637 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1638 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1639 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1640 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1641 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1642 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1643 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1644 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1645 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1646 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1647 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1648 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1649 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1650 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1651 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1652 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1653 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1654 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1655 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1656 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1657 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1658 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1659 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1660 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1661 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1662 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1663 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1664 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1665 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1666 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1667 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1668 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1669 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1670 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1671 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1672 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1673 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1674 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1675 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1676 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1677 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1678 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1679 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1680 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1681 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1682 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1683 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1684 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1685 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1686 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1687 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1688 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1689 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1690 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1691 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1692 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1693 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1694 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1695 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1696 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1697 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1698 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1699 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1700 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1701 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1702 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1703 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1704 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1705 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1706 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1707 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1708 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1709 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1710 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1711 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1712 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1713 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1714 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1715 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1716 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1717 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1718 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1719 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1720 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1721 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1722 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1723 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1724 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1725 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1726 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1727 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1728 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1729 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1730 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1731 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1732 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1733 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1734 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1735 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1736 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1737 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1738 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1739 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1740 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1741 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1742 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1743 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1744 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1745 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1746 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1747 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1748 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1749 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1750 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1751 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1752 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1753 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1754 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1755 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1756 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1757 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1758 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1759 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1760 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1761 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1762 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1763 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1764 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1765 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1766 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1767 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1768 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1769 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1770 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1771 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1772 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1773 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1774 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1775 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1776 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1777 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1778 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1779 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1780 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1781 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1782 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1783 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1784 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1785 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1786 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1787 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1788 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1789 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1790 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1791 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1792 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1793 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1794 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1795 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1796 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1797 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1798 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1799 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1800 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1801 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1802 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1803 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1804 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1805 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1806 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1807 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1808 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1809 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1810 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1811 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1812 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1813 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1814 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1815 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1816 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1817 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1818 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1819 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1820 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1821 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1822 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1823 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1824 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1825 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1826 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1827 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1828 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1829 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1830 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1831 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1832 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1833 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1834 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1835 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1836 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1837 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1838 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1839 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1840 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1841 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1842 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1843 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1844 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1845 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1846 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1847 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1848 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1849 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1850 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1851 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1852 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1853 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1854 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1855 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1856 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1857 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1858 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1859 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1860 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1861 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1862 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1863 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1864 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1865 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1866 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1867 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1868 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1869 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1870 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1871 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1872 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1873 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1874 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1875 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1876 | |
None,
**kwargs
):
"""
:keyword virtual_path: Virtual path.
:paramtype virtual_path: str
:keyword physical_path: Physical path.
:paramtype physical_path: str
:keyword preload_enabled: :code:`<code>true</code>` if preloading is enabled; otherwise,
:code:`<code>false</code>`.
:paramtype preload_enabled: bool
:keyword virtual_directories: Virtual directories for virtual application.
:paramtype virtual_directories: list[~azure.mgmt.web.v2016_09_01.models.VirtualDirectory]
"""
super(VirtualApplication, self).__init__(**kwargs)
self.virtual_path = virtual_path
self.physical_path = physical_path
self.preload_enabled = preload_enabled
self.virtual_directories = virtual_directories
class VirtualDirectory(msrest.serialization.Model):
"""Directory for virtual application.
:ivar virtual_path: Path to virtual application.
:vartype virtual_path: str
:ivar physical_path: Physical path.
:vartype physical_path: str
"""
_attribute_map = {
'virtual_path': {'key': 'virtualPath', 'type': 'str'},
'physical_path': {'key': 'physicalPath', 'type': 'str'},
}
def __init__(
self,
*,
virtual_path: Optional[str] = None,
physical_path: Optional[str] = None,
**kwargs
):
"""
:keyword virtual_path: Path to virtual application.
:paramtype virtual_path: str
:keyword physical_path: Physical path.
:paramtype physical_path: str
"""
super(VirtualDirectory, self).__init__(**kwargs)
self.virtual_path = virtual_path
self.physical_path = physical_path
class VirtualIPMapping(msrest.serialization.Model):
"""Virtual IP mapping.
:ivar virtual_ip: Virtual IP address.
:vartype virtual_ip: str
:ivar internal_http_port: Internal HTTP port.
:vartype internal_http_port: int
:ivar internal_https_port: Internal HTTPS port.
:vartype internal_https_port: int
:ivar in_use: Is virtual IP mapping in use.
:vartype in_use: bool
"""
_attribute_map = {
'virtual_ip': {'key': 'virtualIP', 'type': 'str'},
'internal_http_port': {'key': 'internalHttpPort', 'type': 'int'},
'internal_https_port': {'key': 'internalHttpsPort', 'type': 'int'},
'in_use': {'key': 'inUse', 'type': 'bool'},
}
def __init__(
self,
*,
virtual_ip: Optional[str] = None,
internal_http_port: Optional[int] = None,
internal_https_port: Optional[int] = None,
in_use: Optional[bool] = None,
**kwargs
):
"""
:keyword virtual_ip: Virtual IP address.
:paramtype virtual_ip: str
:keyword internal_http_port: Internal HTTP port.
:paramtype internal_http_port: int
:keyword internal_https_port: Internal HTTPS port.
:paramtype internal_https_port: int
:keyword in_use: Is virtual IP mapping in use.
:paramtype in_use: bool
"""
super(VirtualIPMapping, self).__init__(**kwargs)
self.virtual_ip = virtual_ip
self.internal_http_port = internal_http_port
self.internal_https_port = internal_https_port
self.in_use = in_use
class VirtualNetworkProfile(msrest.serialization.Model):
"""Specification for using a Virtual Network.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource id of the Virtual Network.
:vartype id: str
:ivar name: Name of the Virtual Network (read-only).
:vartype name: str
:ivar type: Resource type of the Virtual Network (read-only).
:vartype type: str
:ivar subnet: Subnet within the Virtual Network.
:vartype subnet: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
subnet: Optional[str] = None,
**kwargs
):
"""
:keyword id: Resource id of the Virtual Network.
:paramtype id: str
:keyword subnet: Subnet within the Virtual Network.
:paramtype subnet: str
"""
super(VirtualNetworkProfile, self).__init__(**kwargs)
self.id = id
self.name = None
self.type = None
self.subnet = subnet
class VnetGateway(ProxyOnlyResource):
"""The Virtual Network gateway contract. This is used to give the Virtual Network gateway access to the VPN package.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar kind: Kind of resource.
:vartype kind: str
:ivar type: Resource type.
:vartype type: str
:ivar vnet_name: The Virtual Network name.
:vartype vnet_name: str
:ivar vpn_package_uri: The URI where the VPN package can be downloaded.
:vartype vpn_package_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vpn_package_uri': {'key': 'properties.vpnPackageUri', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
vnet_name: Optional[str] = None,
vpn_package_uri: Optional[str] = None,
**kwargs
):
"""
:keyword kind: Kind of resource.
:paramtype kind: str
:keyword vnet_name: The Virtual Network name.
:paramtype vnet_name: str
:keyword vpn_package_uri: The URI where the VPN package can be downloaded.
:paramtype vpn_package_uri: str
"""
super(VnetGateway, self).__init__(kind=kind, **kwargs)
self.vnet_name = vnet_name
self.vpn_package_uri = vpn_package_uri
class VnetInfo(ProxyOnlyResource):
"""Virtual Network information contract.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar kind: Kind of resource.
:vartype kind: str
:ivar type: Resource type.
:vartype type: str
:ivar vnet_resource_id: The Virtual Network's resource ID.
:vartype vnet_resource_id: str
:ivar cert_thumbprint: The client certificate thumbprint.
:vartype cert_thumbprint: str
:ivar cert_blob: A certificate file (.cer) blob containing the public key of the private key
used to authenticate a
Point-To-Site VPN connection.
:vartype cert_blob: bytearray
:ivar routes: The routes that this Virtual Network connection uses.
:vartype routes: list[~azure.mgmt.web.v2016_09_01.models.VnetRoute]
:ivar resync_required: :code:`<code>true</code>` if a resync is required; otherwise,
:code:`<code>false</code>`.
:vartype resync_required: bool
:ivar dns_servers: DNS servers to be used by this Virtual Network. This should be a
comma-separated list of IP addresses.
:vartype dns_servers: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'cert_thumbprint': {'readonly': True},
'routes': {'readonly': True},
'resync_required': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_resource_id': {'key': 'properties.vnetResourceId', 'type': 'str'},
'cert_thumbprint': {'key': 'properties.certThumbprint', 'type': 'str'},
'cert_blob': {'key': 'properties.certBlob', 'type': 'bytearray'},
'routes': {'key': 'properties.routes', 'type': '[VnetRoute]'},
'resync_required': {'key': 'properties.resyncRequired', 'type': 'bool'},
'dns_servers': {'key': 'properties.dnsServers', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
vnet_resource_id: Optional[str] = None,
cert_blob: Optional[bytearray] = None,
dns_servers: Optional[str] = None,
**kwargs
):
"""
:keyword kind: Kind of resource.
:paramtype kind: str
:keyword vnet_resource_id: The Virtual Network's resource ID.
:paramtype vnet_resource_id: str
:keyword cert_blob: A certificate file (.cer) blob containing the public key of the private key
used to authenticate a
Point-To-Site VPN connection.
:paramtype cert_blob: bytearray
:keyword dns_servers: DNS servers to be used by this Virtual Network. This should be a
comma-separated list of IP addresses.
:paramtype dns_servers: str
"""
super(VnetInfo, self).__init__(kind=kind, **kwargs)
self.vnet_resource_id = vnet_resource_id
self.cert_thumbprint = None
self.cert_blob = cert_blob
self.routes = None
self.resync_required = None
self.dns_servers = dns_servers
class VnetRoute(ProxyOnlyResource):
"""Virtual Network route contract used to pass routing information for a Virtual Network.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar kind: Kind of resource.
:vartype kind: str
:ivar type: Resource type.
:vartype type: str
:ivar vnet_route_name: The name of this route. This is only returned by the server and does not
need to be set by the client.
:vartype vnet_route_name: str
:ivar start_address: The starting address for this route. This may also include a CIDR
notation, in which case the end address must not be specified.
:vartype start_address: str
:ivar end_address: The ending address for this route. If the start address is specified in CIDR
notation, this must be omitted.
:vartype end_address: str
:ivar route_type: The type of route this is:
DEFAULT - By default, every app has routes to the local address ranges specified by RFC1918
INHERITED - Routes inherited from the real Virtual Network routes
STATIC - Static route set on the app only
These values will be used for syncing an app's routes with those from a Virtual Network.
Possible values include: "DEFAULT", "INHERITED", "STATIC".
:vartype route_type: str or ~azure.mgmt.web.v2016_09_01.models.RouteType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_route_name': {'key': 'properties.name', 'type': 'str'},
'start_address': {'key': 'properties.startAddress', 'type': 'str'},
'end_address': {'key': 'properties.endAddress', 'type': 'str'},
'route_type': {'key': 'properties.routeType', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
vnet_route_name: Optional[str] = None,
start_address: Optional[str] = None,
end_address: Optional[str] = None,
route_type: Optional[Union[str, "RouteType"]] = None,
**kwargs
):
"""
:keyword kind: Kind of resource.
:paramtype kind: str
:keyword vnet_route_name: The name of this route. This is only returned by the server and does
not need to be set by the client.
:paramtype vnet_route_name: str
:keyword start_address: The starting address for | |
select(population):
# for i, potential_parent in enumerate(population):
# y = simulate_ode(potential_parent, TIME_STEPS, BATCH_SIZE, NODES)
# # Multiple outputs
# cost_storage[i] = bandpass_cost_calculator(y[:, -1], BATCH_SIZE) * 1.5
# cost_storage[i] += switch_cost_calculator(y[:, -2], BATCH_SIZE) * 1.25
# # cost_storage[i] = adaptation_cost_calculator(y[:, -1], BATCH_SIZE)
# cost_storage[i] += linear_cost_calculator(y[:, -3], BATCH_SIZE)
# cost_storage[i] /= 3
# # cost_storage[i] += REG_RATE * sum(sum(abs(potential_parent))) # regularization
# double_mergesort(cost_storage, population)
# y = simulate_ode(population[0], TIME_STEPS, BATCH_SIZE, NODES)
# print("Bandpass Cost:", bandpass_cost_calculator(y[:, -1], BATCH_SIZE))
# print("Switch Cost:", switch_cost_calculator(y[:, -2], BATCH_SIZE))
# print("Linear Cost:", linear_cost_calculator(y[:, -3], BATCH_SIZE))
# # print(cost_storage[0])
# survivors = population[:SURVIVABLE_PARENTS]
# survivors = np.append(survivors, survivors, axis=0)
# # repopulated_parents = np.append(repopulated_parents, survivors, axis=0)
# # random_children = np.random.uniform(-BOUND, BOUND, (SURVIVABLE_PARENTS, NODES, NODES))
# # survivors = np.append(repopulated_parents, random_children, axis=0)
# # print(repopulated_parents)
# return survivors, population[0], cost_storage[0]
# def select(population):
# # Harmonic Version - Mitigate Impact of Outliers
# for i, potential_parent in enumerate(population):
# y = simulate_ode(potential_parent, TIME_STEPS, BATCH_SIZE, NODES)
# # Multiple outputs
# f_bandpass = BATCH_SIZE - bandpass_cost_calculator(y[:, -1], BATCH_SIZE)
# f_switch = BATCH_SIZE - switch_cost_calculator(y[:, -2], BATCH_SIZE)
# f_linear = BATCH_SIZE - linear_cost_calculator(y[:, -3], BATCH_SIZE)
# cost_storage[i] = BATCH_SIZE - 3 / (((1/f_bandpass) + (1/f_switch) + (1/f_linear)))
# # cost_storage[i] += REG_RATE * sum(sum(abs(potential_parent))) # regularization
# # cost_storage[i] = f_bandpass + f_switch + f_linear
# double_mergesort(cost_storage, population)
# y = simulate_ode(population[0], TIME_STEPS, BATCH_SIZE, NODES)
# print("Bandpass Cost:", bandpass_cost_calculator(y[:, -1], BATCH_SIZE))
# print("Switch Cost:", switch_cost_calculator(y[:, -2], BATCH_SIZE))
# print("Linear Cost:", linear_cost_calculator(y[:, -3], BATCH_SIZE))
# # print(cost_storage[0])
# survivors = population[:SURVIVABLE_PARENTS]
# survivors = np.append(survivors, survivors, axis=0)
# # repopulated_parents = np.append(repopulated_parents, survivors, axis=0)
# # random_children = np.random.uniform(-BOUND, BOUND, (SURVIVABLE_PARENTS, NODES, NODES))
# # survivors = np.append(repopulated_parents, random_children, axis=0)
# # print(repopulated_parents)
# return survivors, population[0], cost_storage[0]
# def select(population):
# # Square Version - Aggravate Impact of Outliers
# for i, potential_parent in enumerate(population):
# y = simulate_ode(potential_parent, TIME_STEPS, BATCH_SIZE, NODES)
# # Multiple outputs
# f_bandpass = bandpass_cost_calculator(y[:, -1], BATCH_SIZE)
# f_bandpass_reversed = bandpass_reversed_cost_calculator(y[:, -2], BATCH_SIZE)
# f_switch = switch_cost_calculator(y[:, -3], BATCH_SIZE)
# # f_valley = valley_cost_calculator(y[:, -3], BATCH_SIZE)
# # f_linear = linear_cost_calculator(y[:, -3], BATCH_SIZE)
# # cost_storage[i] = valley_cost_calculator(y[:, -1], BATCH_SIZE)
# # cost_storage[i] = peak_cost_calculator(y[:, -1], BATCH_SIZE)
# # cost_storage[i] = bandpass_cost_calculator(y[:, -1], BATCH_SIZE)
# cost_storage[i] = f_bandpass**2 + f_switch**2 + f_bandpass_reversed**2
# # cost_storage[i] += REG_RATE * sum(sum(abs(potential_parent))) # regularization
# # cost_storage[i] = f_bandpass + f_switch + f_linear
# double_mergesort(cost_storage, population)
# y = simulate_ode(population[0], TIME_STEPS, BATCH_SIZE, NODES)
# print("Bandpass Cost:", bandpass_cost_calculator(y[:, -1], BATCH_SIZE))
# print("Valley Cost:", bandpass_reversed_cost_calculator(y[:, -2], BATCH_SIZE))
# print("Switch Cost:", switch_cost_calculator(y[:, -3], BATCH_SIZE))
# # print("Valley Cost:", valley_cost_calculator(y[:, -3], BATCH_SIZE))
# # print("Linear Cost:", linear_cost_calculator(y[:, -3], BATCH_SIZE))
# # print(cost_storage[0])
# survivors = population[:SURVIVABLE_PARENTS]
# survivors = np.append(survivors, survivors, axis=0)
# # repopulated_parents = np.append(repopulated_parents, survivors, axis=0)
# # random_children = np.random.uniform(-BOUND, BOUND, (SURVIVABLE_PARENTS, NODES, NODES))
# # survivors = np.append(repopulated_parents, random_children, axis=0)
# # print(repopulated_parents)
# return survivors, population[0], cost_storage[0]
def select(population):
for i, potential_parent in enumerate(population):
f_bandpass = simulate_and_cost_bandpass(potential_parent)
f_bandpass_reversed = simulate_and_cost_bandpass_reversed(potential_parent)
f_switch = simulate_and_cost_switch(potential_parent)
cost_storage[i] = f_bandpass ** 2 + f_bandpass_reversed ** 2 + f_switch ** 2
double_mergesort(cost_storage, population)
survivors = population[:SURVIVABLE_PARENTS]
survivors = np.append(survivors, survivors, axis=0)
return survivors, population[0], cost_storage[0]
def plot(y):
b = np.linspace(1, BATCH_SIZE, BATCH_SIZE)
plt.title(f"{NODES} Nodes")
plt.plot(b, y[:, 0], "black", linewidth=2, label="Input Node #1")
plt.plot(b, y[:, 1], "saddlebrown", linewidth=2, label="Input Node #2")
for i in range(2, y.shape[1] - 1):
# plt.plot(b, y[:, i], 'g-', linewidth=2, label='Support Node')
plt.plot(b, y[:, i], "gray", linewidth=2)
plt.plot(b, y[:, -1], "r", linewidth=2, label="Multifunction Output Node")
plt.xlabel("Input Level")
plt.ylabel("Output Level")
plt.legend()
plt.show()
def simulate_and_cost_bandpass(individual):
# Encode <- 0, 1
input_val = np.zeros((BATCH_SIZE, NODES))
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, individual)) - initial_val + input_val) * dt
)
for i in range(1, TIME_STEPS):
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
output + (sigmoid(np.matmul(output, individual)) - output + input_val) * dt
)
cost = np.sum(np.abs(output[:, -1] - bandpass_design))
return cost
def simulate_and_cost_bandpass_reversed(individual):
# Encode <- 1, 0
input_val = np.zeros((BATCH_SIZE, NODES))
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, individual)) - initial_val + input_val) * dt
)
for i in range(1, TIME_STEPS):
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
output + (sigmoid(np.matmul(output, individual)) - output + input_val) * dt
)
cost = np.sum(np.abs(output[:, -1] - bandpass_reversed_design))
return cost
switch_design = [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
]
switch_design = np.array(switch_design)
def simulate_and_cost_switch(individual):
# Encode <- 1, 1
input_val = np.zeros((BATCH_SIZE, NODES))
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, individual)) - initial_val + input_val) * dt
)
for i in range(1, TIME_STEPS):
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
output + (sigmoid(np.matmul(output, individual)) - output + input_val) * dt
)
cost = np.sum(np.abs(output[:, -1] - switch_design))
return cost
def simulate_plot_cost_bandpass(individual):
# Encode <- 0, 1
input_val = np.zeros((BATCH_SIZE, NODES))
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, individual)) - initial_val + input_val) * dt
)
for i in range(1, TIME_STEPS):
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
output + (sigmoid(np.matmul(output, individual)) - output + input_val) * dt
)
plot(output)
def simulate_and_plot_bandpass_reversed(individual):
# Encode <- 1, 0
input_val = np.zeros((BATCH_SIZE, NODES))
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, individual)) - initial_val + input_val) * dt
)
for i in range(1, TIME_STEPS):
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
output + (sigmoid(np.matmul(output, individual)) - output + input_val) * dt
)
plot(output)
def simulate_and_plot_switch(individual):
# Encode <- 1, 1
input_val = np.zeros((BATCH_SIZE, NODES))
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, individual)) - initial_val + input_val) * dt
)
for i in range(1, TIME_STEPS):
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
output + (sigmoid(np.matmul(output, individual)) - output + input_val) * dt
)
plot(output)
def distributed_select(population):
pass
# Mutation
def mutate(population):
# doesn't mutate the elite
for p in range(1, len(population)):
for i in range(NODES):
for j in range(NODES):
if np.random.rand() < RANDOM_MUTATION_RATE:
population[p][i][j] = (
BOUND * np.random.rand() * (-1) ** np.random.randint(2)
)
elif np.random.rand() < SIGN_FLIP_MUTATION_RATE:
population[p][i][j] = -1 * population[p][i][j]
else:
population[p][i][j] += (
STEP_SIZE * np.random.rand() * (-1) ** np.random.randint(2)
)
# population[p][i][j] += 100
# print(population)
return population
def original_mutate(population):
for p in range(1, len(population)):
for i in range(NODES):
for j in range(NODES):
population[p][i][j] += (
STEP_SIZE * np.random.rand() * (-1) ** np.random.randint(2)
)
return population
def distributed_mutation(individual):
for i in range(NODES):
for j in range(NODES):
individual[i][j] += (
STEP_SIZE * np.random.rand() * (-1) ** np.random.randint(2)
)
return individual
def distributed_small_mutation(individual):
for i in range(NODES):
for j in range(NODES):
if np.random.rand() < STEP_MUTATION_RATE:
individual[i][j] += (
STEP_SIZE * np.random.rand() * (-1) ** np.random.randint(2)
)
else:
individual[i][j] = (
BOUND * np.random.rand() * (-1) ** np.random.randint(2)
)
# elif np.random.rand() < SIGN_FLIP_MUTATION_RATE:
# individual[i][j] = -1 * individual[i][j]
# population[p][i][j] += 100
return individual
def distributed_big_mutation(individual):
for i in range(NODES):
for j in range(NODES):
if np.random.rand() < BIG_STEP_MUTATION_RATE:
individual[i][j] += (
BIG_STEP_SIZE * np.random.rand() * (-1) ** np.random.randint(2)
)
else:
individual[i][j] = (
BOUND * np.random.rand() * (-1) ** np.random.randint(2)
)
# elif np.random.rand() < SIGN_FLIP_MUTATION_RATE:
# individual[i][j] = -1 * individual[i][j]
# population[p][i][j] += 100
return individual
# =============================================================================
# Random Initialization Phase
population = np.random.uniform(-BOUND, BOUND, (POPULATION, NODES, NODES))
# print(population)
# population = standardize(population)
# print(population)
# multiprocessing pool initializer
pool = multiprocessing.Pool(pool_size)
# best_score = BATCH_SIZE
# best_elite = -1
# Genetic Algorithm Loop
for g in range(GENERATIONS):
# Simulated Annealing
# if g % 10 == 0 and STEP_SIZE > 0.1:
STEP_SIZE -= 0.005
BOUND -= 0.01
# for g in range(1):
# print(population)
print("Generation:", g)
start = time.time()
survivors, elite, elite_score = select(population)
end = time.time()
print("Selection Time:", end - start)
print("Elite Score:", elite_score)
# if g % 10 == 0:
np.save(
f"large_controllability_generation_result_3000/controllability-encoded-2-in-1-out-generation-{g}.npy",
elite,
)
# if elite_score < best_score:
# best_score = elite_score
# best_elite = survivors[0]
# print("Elite:\n", elite)
# print("10th:\n", population[9])
# break if found t he solution
# print(COST_UPPER_BOUND)
# if elite_score < COST_UPPER_BOUND:
| |
<filename>boto3_type_annotations_with_docs/boto3_type_annotations/autoscaling_plans/paginator.py
from typing import Dict
from typing import List
from botocore.paginate import Paginator
class DescribeScalingPlanResources(Paginator):
def paginate(self, ScalingPlanName: str, ScalingPlanVersion: int, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AutoScalingPlans.Client.describe_scaling_plan_resources`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/DescribeScalingPlanResources>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ScalingPlanName='string',
ScalingPlanVersion=123,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ScalingPlanResources': [
{
'ScalingPlanName': 'string',
'ScalingPlanVersion': 123,
'ServiceNamespace': 'autoscaling'|'ecs'|'ec2'|'rds'|'dynamodb',
'ResourceId': 'string',
'ScalableDimension': 'autoscaling:autoScalingGroup:DesiredCapacity'|'ecs:service:DesiredCount'|'ec2:spot-fleet-request:TargetCapacity'|'rds:cluster:ReadReplicaCount'|'dynamodb:table:ReadCapacityUnits'|'dynamodb:table:WriteCapacityUnits'|'dynamodb:index:ReadCapacityUnits'|'dynamodb:index:WriteCapacityUnits',
'ScalingPolicies': [
{
'PolicyName': 'string',
'PolicyType': 'TargetTrackingScaling',
'TargetTrackingConfiguration': {
'PredefinedScalingMetricSpecification': {
'PredefinedScalingMetricType': 'ASGAverageCPUUtilization'|'ASGAverageNetworkIn'|'ASGAverageNetworkOut'|'DynamoDBReadCapacityUtilization'|'DynamoDBWriteCapacityUtilization'|'ECSServiceAverageCPUUtilization'|'ECSServiceAverageMemoryUtilization'|'ALBRequestCountPerTarget'|'RDSReaderAverageCPUUtilization'|'RDSReaderAverageDatabaseConnections'|'EC2SpotFleetRequestAverageCPUUtilization'|'EC2SpotFleetRequestAverageNetworkIn'|'EC2SpotFleetRequestAverageNetworkOut',
'ResourceLabel': 'string'
},
'CustomizedScalingMetricSpecification': {
'MetricName': 'string',
'Namespace': 'string',
'Dimensions': [
{
'Name': 'string',
'Value': 'string'
},
],
'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',
'Unit': 'string'
},
'TargetValue': 123.0,
'DisableScaleIn': True|False,
'ScaleOutCooldown': 123,
'ScaleInCooldown': 123,
'EstimatedInstanceWarmup': 123
}
},
],
'ScalingStatusCode': 'Inactive'|'PartiallyActive'|'Active',
'ScalingStatusMessage': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **ScalingPlanResources** *(list) --*
Information about the scalable resources.
- *(dict) --*
Represents a scalable resource.
- **ScalingPlanName** *(string) --*
The name of the scaling plan.
- **ScalingPlanVersion** *(integer) --*
The version number of the scaling plan.
- **ServiceNamespace** *(string) --*
The namespace of the AWS service.
- **ResourceId** *(string) --*
The ID of the resource. This string consists of the resource type and unique identifier.
* Auto Scaling group - The resource type is ``autoScalingGroup`` and the unique identifier is the name of the Auto Scaling group. Example: ``autoScalingGroup/my-asg`` .
* ECS service - The resource type is ``service`` and the unique identifier is the cluster name and service name. Example: ``service/default/sample-webapp`` .
* Spot Fleet request - The resource type is ``spot-fleet-request`` and the unique identifier is the Spot Fleet request ID. Example: ``spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`` .
* DynamoDB table - The resource type is ``table`` and the unique identifier is the resource ID. Example: ``table/my-table`` .
* DynamoDB global secondary index - The resource type is ``index`` and the unique identifier is the resource ID. Example: ``table/my-table/index/my-table-index`` .
* Aurora DB cluster - The resource type is ``cluster`` and the unique identifier is the cluster name. Example: ``cluster:my-db-cluster`` .
- **ScalableDimension** *(string) --*
The scalable dimension for the resource.
* ``autoscaling:autoScalingGroup:DesiredCapacity`` - The desired capacity of an Auto Scaling group.
* ``ecs:service:DesiredCount`` - The desired task count of an ECS service.
* ``ec2:spot-fleet-request:TargetCapacity`` - The target capacity of a Spot Fleet request.
* ``dynamodb:table:ReadCapacityUnits`` - The provisioned read capacity for a DynamoDB table.
* ``dynamodb:table:WriteCapacityUnits`` - The provisioned write capacity for a DynamoDB table.
* ``dynamodb:index:ReadCapacityUnits`` - The provisioned read capacity for a DynamoDB global secondary index.
* ``dynamodb:index:WriteCapacityUnits`` - The provisioned write capacity for a DynamoDB global secondary index.
* ``rds:cluster:ReadReplicaCount`` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
- **ScalingPolicies** *(list) --*
The scaling policies.
- *(dict) --*
Represents a scaling policy.
- **PolicyName** *(string) --*
The name of the scaling policy.
- **PolicyType** *(string) --*
The type of scaling policy.
- **TargetTrackingConfiguration** *(dict) --*
The target tracking scaling policy. Includes support for predefined or customized metrics.
- **PredefinedScalingMetricSpecification** *(dict) --*
A predefined metric. You can specify either a predefined metric or a customized metric.
- **PredefinedScalingMetricType** *(string) --*
The metric type. The ``ALBRequestCountPerTarget`` metric type applies only to Auto Scaling groups, Spot Fleet requests, and ECS services.
- **ResourceLabel** *(string) --*
Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ``ALBRequestCountPerTarget`` and there is a target group for an Application Load Balancer attached to the Auto Scaling group, Spot Fleet request, or ECS service.
The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:
* app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN.
* targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.
- **CustomizedScalingMetricSpecification** *(dict) --*
A customized metric. You can specify either a predefined metric or a customized metric.
- **MetricName** *(string) --*
The name of the metric.
- **Namespace** *(string) --*
The namespace of the metric.
- **Dimensions** *(list) --*
The dimensions of the metric.
Conditional: If you published your metric with dimensions, you must specify the same dimensions in your customized scaling metric specification.
- *(dict) --*
Represents a dimension for a customized metric.
- **Name** *(string) --*
The name of the dimension.
- **Value** *(string) --*
The value of the dimension.
- **Statistic** *(string) --*
The statistic of the metric.
- **Unit** *(string) --*
The unit of the metric.
- **TargetValue** *(float) --*
The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).
- **DisableScaleIn** *(boolean) --*
Indicates whether scale in by the target tracking scaling policy is disabled. If the value is ``true`` , scale in is disabled and the target tracking scaling policy doesn't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking scaling policy can remove capacity from the scalable resource.
The default value is ``false`` .
- **ScaleOutCooldown** *(integer) --*
The amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start. This value is not used if the scalable resource is an Auto Scaling group.
While the cooldown period is in effect, the capacity that has been added by the previous scale-out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out.
- **ScaleInCooldown** *(integer) --*
The amount of time, in seconds, after a scale in activity completes before another scale in activity can start. This value is not used if the scalable resource is an Auto Scaling group.
The cooldown period is used to block subsequent scale in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, AWS Auto Scaling scales out your scalable target immediately.
- **EstimatedInstanceWarmup** *(integer) --*
The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. This value is used only if the resource is an Auto Scaling group.
- **ScalingStatusCode** *(string) --*
The scaling status of the resource.
* ``Active`` - The scaling configuration is active.
* ``Inactive`` - The scaling configuration is not active because the scaling plan is being created or the scaling configuration could not be applied. Check the status message for more information.
* ``PartiallyActive`` - The scaling configuration is partially active because the scaling plan is being created or deleted or the scaling configuration could not be fully applied. Check the status message for more information.
- **ScalingStatusMessage** *(string) --*
A simple message about the current scaling status of the resource.
:type ScalingPlanName: string
:param ScalingPlanName: **[REQUIRED]**
The name of the scaling plan.
:type ScalingPlanVersion: integer
:param ScalingPlanVersion: **[REQUIRED]**
The version number of the scaling plan.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class | |
', len(results)
print '\n debug -- len(results_plus): ', len(results_plus)
results.update(results_plus)
if rs_array_names:
codes['rs_array_names'] = rs_array_names
if debug:
print '\n debug -- codes: %s' % json.dumps(codes, indent=4, sort_keys=True)
print '\n debug -- final len(results): ', len(results)
keys = results.keys()
keys.sort()
print '\n debug -- final results.keys(): ', json.dumps(keys, indent=4, sort_keys=True)
# Return vocabulary results (list) and codes (dict)
return results, codes
except Exception as err:
message = str(err)
current_app.logger.info(message)
raise
def make_display_name(data):
""" Get instrument display name values from vocab element provided. Return long_name, name and id.
Sample input data item:
{
"@class" : ".VocabRecord",
"refdes" : "CE01ISSM-MFC31-00-CPMENG000",
"vocabId" : 1,
"instrument" : "Buoy Controller Engineering",
"tocL1" : "Endurance",
"tocL2" : "OR Inshore Surface Mooring",
"tocL3" : "Multi-Function Node"
}
"""
long_name = None
name = None
id = 0
try:
rd = data['refdes']
rd_len = len(rd)
# Process standard instruments
if rd_len == 27:
tmp = " ".join([data['tocL1'], data['tocL2']])
long_name = " - ".join([tmp, data['tocL3'], data['instrument']])
name = data['instrument']
id = data['vocabId']
# Irregular reference designators.
elif rd_len > 14 and rd_len < 27:
tmp = " ".join([data['tocL1'], data['tocL2']])
long_name = " - ".join([tmp, data['tocL3'], data['instrument']])
name = data['instrument']
id = data['vocabId']
else:
message = 'Malformed reference designator (%s), cannot process vocabulary.' % rd
current_app.logger.info(message)
return long_name, name, id
except Exception as err:
message = 'Failed to assemble display_name for vocabulary; %s ' % str(err)
current_app.logger.info(message)
return None, None, 0
def create_vocabulary_codes(vocabs):
""" Create codes dictionary from vocabulary provided; the codes dictionary stores semantics provided in vocabulary.
"""
debug = False
extra_nodes = {
"AV": "AUV",
"DP": "Wire-Following Profiler",
"GL": "Coastal Glider",
"LJ": "Low-Power JBox",
"LV": "Low-Voltage Node",
"MF": "Multi-Function Node",
"MJ": "Medium-Power JBox",
"PC": "200m Platform",
"PD": "Profiler Docking Station",
"PG": "Global Profiling Glider",
"PN": "Primary Node",
"SC": "Winch Controller",
"SF": "Shallow Profiler",
"XX": "Bench Instrument"
}
extra_arrays = {
'RS': 'Cabled'
}
extra_subsites = {
"ASPI": "ASPI"
}
# The codes dictionary stores semantics provided in vocabulary.
codes = {'arrays': {}, 'subsites': {}, 'nodes': {}, 'classes': {}}
arrays = extra_arrays
subsites = extra_subsites
nodes = extra_nodes
classes = {}
try:
if debug: print '\n debug -- entered create_vocabulary_codes...'
if not vocabs or vocabs is None:
message = 'vocabulary provided is empty or None'
raise Exception(message)
# Process each vocabulary item
for item in vocabs:
rd = item['refdes']
len_rd = len(rd)
vocab = item.copy()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# -- Compile codes for dynamic display name generation
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
subsite = None
node = None
instr = None
if len_rd == 27 or (len_rd > 14 and len_rd < 27):
subsite, node, instr = rd.split('-', 2)
elif len_rd == 14:
subsite, node = rd.split('-')
elif len_rd == 8:
subsite = rd
else:
print '\n debug malformed reference designator: ', rd
continue
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Process reference designator into components:
# array_code, subsite_code, node_code, instr_class, [temp vars: instr, port, instrument]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
array_code = rd[:2]
subsite_code = ''
if subsite is not None:
subsite_code = subsite[4:8]
node_code = ''
if node is not None:
node_code = node[0:2]
instr_class = ''
if instr is not None:
if '-' in instr:
port, instrument = instr.split('-')
if instrument:
if len(instrument) >= 5:
instr_class = instrument[0:5]
else:
instr_class = instrument
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Accumulate the information for codes array
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if array_code not in arrays:
if vocab['tocL1']:
arrays[array_code] = vocab['tocL1']
if subsite_code:
if subsite_code not in subsites:
if vocab['tocL2']:
subsites[subsite_code] = vocab['tocL2']
if node_code:
if node_code not in nodes:
if vocab['tocL3']:
nodes[node_code] = vocab['tocL3']
if instr_class:
if instr_class not in classes:
if vocab['instrument']:
classes[instr_class] = vocab['instrument']
# Compile information into codes dictionary.
codes['arrays'] = arrays
codes['subsites'] = subsites
codes['nodes'] = nodes
codes['classes'] = classes
if debug: print '\n debug -- codes(%d): %s' % (len(codes), json.dumps(codes, indent=4, sort_keys=True) )
return codes
except Exception as err:
message = 'Error processing vocabulary codes. %s' % str(err)
current_app.logger.info(message)
raise
# ========================================================================
# Vocabulary database queries for stream and stream parameters
# ========================================================================
def get_parameter_name_by_parameter(stream_parameter_name):
""" Get parameter name using database.
"""
debug = False
streamParameter = StreamParameter.query.filter_by(stream_parameter_name = stream_parameter_name).first()
if streamParameter is None or streamParameter is []:
if debug: print '[param] ', stream_parameter_name
return None
stream_display_name = streamParameter.standard_name
return stream_display_name
def get_stream_name_by_stream(stream):
""" Get stream name using database.
"""
debug = False
_stream = Stream.query.filter_by(stream=stream).first()
if _stream is None or _stream is []:
if debug: print '[strem] ', stream
return None
stream_display_name = _stream.concatenated_name
return stream_display_name
# ========================================================================
# utility functions
# ========================================================================
def get_uframe_vocab_info():
""" Get uframe vocabulary configuration information.
"""
try:
uframe_url = current_app.config['UFRAME_VOCAB_URL']
timeout = current_app.config['UFRAME_TIMEOUT_CONNECT']
timeout_read = current_app.config['UFRAME_TIMEOUT_READ']
return uframe_url, timeout, timeout_read
except:
message = 'Unable to locate UFRAME_VOCAB_URL, UFRAME_TIMEOUT_CONNECT or UFRAME_TIMEOUT_READ in config file.'
current_app.logger.info(message)
raise Exception(message)
def get_vocab_from_uframe():
""" Get vocab items from uframe. Return dict (with reference designator as key), None or exception.
Sample response:
[
{
"@class" : ".VocabRecord",
"refdes" : "CE01ISSM-MFC31-00-CPMENG000",
"vocabId" : 1,
"instrument" : "Buoy Controller Engineering",
"tocL1" : "Endurance",
"tocL2" : "OR Inshore Surface Mooring",
"tocL3" : "Multi-Function Node"
},
...
]
"""
try:
uframe_url, timeout, timeout_read = get_uframe_vocab_info()
url = uframe_url + '/vocab'
response = requests.get(url, timeout=(timeout, timeout_read))
if response.status_code != 200:
message = '(%d) Failed to successfully get vocabulary from uframe.' % response.status_code
raise Exception(message)
if not response or response is None:
message = 'Failed to get uframe vocabulary; returned empty list.'
raise Exception(message)
try:
result = response.json()
except Exception as err:
message = 'Malformed json response from uframe vocabulary. %s' % str(err)
raise Exception(message)
if not result or result is None:
message = 'Empty (or None) result returned for uframe vocabulary.'
raise Exception(message)
return result
except ConnectionError:
message = 'ConnectionError getting uframe vocabulary.'
current_app.logger.info(message)
raise Exception(message)
except Timeout:
message = 'Timeout getting uframe vocabulary.'
current_app.logger.info(message)
raise Exception(message)
except Exception as err:
message = "Error getting uframe vocabulary. %s" % str(err)
current_app.logger.info(message)
raise
def build_long_display_name(rd):
""" Get long display name for reference designator using the codes dictionary.
"""
debug = False
is_rs = False
try:
# Get 'vocab_dict' if cached, if not cached build cache, set and continue
dict_cached = cache.get('vocab_codes')
if dict_cached:
vocab_codes = dict_cached
else:
vocab_dict, vocab_codes = compile_vocab()
cache.set('vocab_dict', vocab_dict, timeout=CACHE_TIMEOUT)
cache.set('vocab_codes', vocab_codes, timeout=CACHE_TIMEOUT)
# Verify 'vocab_codes' has content, otherwise error
if not vocab_codes:
message = 'Vocabulary processing failed to obtain vocab_codes dictionary, unable to process.'
current_app.logger.info(message)
return None
# Process reference designator using 'vocab_dict' and 'vocab_codes'
len_rd = len(rd)
if len_rd < 8:
return None
# Build display name for instrument
rs_code = None
array_code = rd[:2]
if array_code == 'RS':
is_rs = True
rs_code = rd[:8]
if len_rd == 27:
if debug: print '\n (build long display name) reference designator \'%s\'.' % rd
subsite, node, instr = rd.split('-', 2)
subsite_code = | |
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import torch
from . import enums
_end_ipu_block = torch.ops.poptorch.end_ipu_block
def ipu_print_tensor(tensor, title=""):
return torch.ops.poptorch.ipu_print_tensor(tensor, title)
def nop(tensor):
""" A no-operation: it is functionally the same as an identity but is never
elimated by PopART patterns or inlining, so it is useful for debugging.
:param torch.Tensor tensor: the tensor to simply return by the no-op.
:returns: The same tensor which was input.
:rtype: torch.Tensor
"""
return torch.ops.popart.nop(tensor)
def recomputationCheckpoint(*tensors):
"""Operation for checkpointing values in a computational pipeline stage.
When recomputation is enabled, these values will not be recomputed and they
will be stored in memory between forward and backwards passes instead.
:param tensors: one or more tensors which should be checkpointed.
:return: Tensors (same number and shape as the input tensors).
"""
out = torch.ops.poptorch.recomputation_checkpoint(tensors)
if len(tensors) == 1:
return out[0]
return out
def serializedMatMul(lhs, rhs, mode, factor=0, keep_precision=False):
""" Calculates a matrix product using a serialized matrix multiplication.
The matrix multiplication, lhs*rhs, is split into separate smaller
multiplications, calculated one after the other, to reduce the memory
requirements of the multiplication and its gradient calculation.
:param torch.Tensor lhs: Left-hand size input matrix.
:param torch.Tensor rhs: Right-hand side input matrix.
:param poptorch.MatMulSerializationMode mode: Which dimension of the matmul
to serialize on: for matrix A (m by n) multiplied by matrix B (n by p).
* InputChannels: Split across the input channels (dimension m).
* ReducingDim: Split aross the reducing dimension (n).
* OutputChannels: Split across the output channels (dimenion p).
* Disabled: Same as an ordinary matrix multiplication.
:param int factor: Number of serialized multiplications. Must be a factor of
the dimension to serialize on.
:param bool keep_precision: (Half/float16 inputs only) The forward op when
serializing over ReducingDim and the backwards ops when serializing over
InputChannels involve an addition step. If ``keep_precision`` is True,
these additions will occur using float32 rather than half precision
partials, matching those used for the individual matrix multiplications.
"""
assert isinstance(keep_precision, bool)
assert isinstance(factor, int)
assert isinstance(mode, enums.MatMulSerializationMode)
out = torch.matmul(lhs, rhs)
return torch.ops.poptorch.set_matmul_serialization(out, mode.value, factor,
keep_precision)
def set_available_memory(tensor, available_memory_proportion):
""" Sets the available memory for a convolution or matrix multiplication.
When called on the on the output of a convolution or a matrix
multiplication, it sets the proportion of tile memory (between 0 and 1) to
be made available as temporary memory for the convolution/matrix
multipication. Less temporary memory will reduce the time performance but
may use less memory overall. Lower memory proportions result in the use of
more live (not tempoerary) memory, and so the overall memory may increase
for too low values, possibly resulting in out of memory errors.
In the event that the value is too low, the planner will replan for the
smaller memory usage possible.
>>> class BasicNetwork(nn.Module):
... def __init__(self):
... super().__init__()
... self.conv = nn.Conv2d(4, 4, 3, stride=2)
...
... def forward(self, x):
... out = self.conv(x)
... out = poptorch.set_available_memory(out, 0.2)
... return out
:param torch.Tensor tensor: output tensor of a convolution or matrix
multiplication (otherwise the statement will be an identity).
:param float available_memory_proportion: proportion between 0.0 and 1.0
of tile memory to be made available for temporary memory (default 0.6).
:returns: input tensor, as if calling an identity function.
:rtype: torch.Tensor
"""
return torch.ops.poptorch.set_available_memory(
tensor, available_memory_proportion)
def _assertIdIsValid(name, value, expected_type):
assert isinstance(value, expected_type) or \
(isinstance(value, int) and value >= 0), (
f"{name} must be either a positive integer or a "
f"{expected_type.__name__}")
class Block(torch.nn.Module):
"""Runs all layers called inside this scope on a specified IPU.
>>> with poptorch.Block("IPU0"):
... self.layer = MyLayer(x)
"""
# Will be set by the ExecutionStrategy before the graph is traced.
# If it's None then it means it's a CPU execution of the graph so
# turn the whole class into a no-op.
_stages_manager = None
@staticmethod
def useAutoId():
"""Call this method at the beginning of your ``forward()`` method to
enable automatic block id generation.
Blocks with a None ``user_id`` will be assigned an automatic id
which will be the index of this block in the list of id-less Blocks.
>>> poptorch.Block.useAutoId()
>>> with poptorch.Block(): # user_id = "0"
... layer()
>>> with poptorch.Block("special_block"): # user_id = "special_block"
... layer()
>>> with poptorch.Block(): # user_id = "1"
... layer()
"""
if Block._stages_manager is not None:
Block._stages_manager.resetAutoId()
def __init__(self, user_id=None, ipu_id=None):
"""
:param user_id: A user defined identifier for the block.
Blocks with the same id are considered as being a single block.
Block identifiers are also used to manually specify pipelines or
phases.
:type user_id: str, optional
:param int, optional ipu_id: The id of the IPU to run on.
Note that the ``ipu_id`` is an index
in a multi-IPU device within PopTorch, and is
separate and distinct from the device ids used by
``gc-info``.
"""
super().__init__()
self._user_id = user_id
self._ipu_id = ipu_id
def __enter__(self):
if Block._stages_manager is not None:
Block._stages_manager.beginStage(self._user_id, self._ipu_id)
def __exit__(self, type, value, traceback):
_end_ipu_block()
class BeginBlock(torch.nn.Module):
"""Runs all layers from the given layer until the beginning of the next
block on a specified IPU.
All layers after this layer will also run on
the same IPU until another ``BeginBlock`` is encountered.
By default :py:class:`PipelinedExecution` will be used, however this
can be overridden in the `poptorch.Options`.
.. seealso:: :py:meth:`poptorch.Options.setExecutionStrategy`
>>> self.layer = poptorch.BeginBlock(MyLayer(x))
"""
def __init__(self, layer_to_call, user_id=None, ipu_id=None):
"""
All subsequent layers of the network will be part of this block until
another layer is wrapped.
:param torch.nn.Module layer_to_call: The layer to run on the
specified IPU.
:param user_id: A user defined identifier for the block.
Blocks with the same id are considered as being a single block.
Block identifiers are also used to manually create
:py:class:`Stages<poptorch.Stage>` and
:py:class:`Phases<poptorch.Phase>`.
:type user_id: str, optional
:param int, optional ipu_id: The id of the IPU to run on.
Note that the ``ipu_id`` is an index
in a multi-IPU device within PopTorch, and is
separate and distinct from the device ids used by
``gc-info``.
"""
super().__init__()
self._user_id = user_id
self._layer_to_call = layer_to_call
self._ipu_id = ipu_id
def __call__(self, *input, **kwargs):
if Block._stages_manager is not None:
if self._user_id is None:
self._user_id = Block._stages_manager.nextAutoId()
Block._stages_manager.beginStage(self._user_id, self._ipu_id)
out = self._layer_to_call(*input, **kwargs)
return out
def custom_op(inputs, name, domain, domain_version, example_outputs):
"""Applies a custom operation, implemented within PopART, to the inputs.
:param tuple inputs: A tuple of input tensors, for example, (x, y).
:param str name: unique name of the PopART custom
:param str domain: domain for the op
:param int domain_version: version of the domain to use
:param iterable example_outputs: a tuple of tensors with the same type
and shape of the outputs; the value does not matter as all values will
be set to zero for tracing purposes.
:returns: The outputs of the forward op of the custom op.
"""
transformed_outputs = []
for output in example_outputs:
# Dead code which will get eliminated but will safely allow the same
# input to be provided to example_output (since it is only supposed
# to be a template). Otherwise the compiler may recognise the alias.
transformed_outputs.append(torch.zeros_like(output))
return torch.ops.poptorch.custom_operation(inputs, name, domain,
domain_version,
len(transformed_outputs),
transformed_outputs)
def identity_loss(x, reduction):
"""Marks this operation as being part of the loss calculation and, as such,
will back-propagate through it in the PopTorch autograd. This enables
multiple losses and custom losses.
:param torch.Tensor loss: The calculated loss.
:param str reduction: Reduce the loss output as per PyTorch loss
semantics. Supported values are:
* ``"sum"``: Sum the losses.
* ``"mean"``: Take the mean of the losses.
* ``"none"``: Don't reduce the losses.
:returns: An identity loss custom op.
"""
if reduction == "sum":
return torch.ops.poptorch.identity_loss(x, 0)
if reduction == "mean":
return torch.ops.poptorch.identity_loss(x, 1)
assert reduction == "none", "Unsupported reduction type!"
return torch.ops.poptorch.identity_loss(x, 2)
class MultiConv():
"""
Combines all convolution layers evaluated inside this scope into a single
multi-convolution.
Multi-convolutions allow for a set of data-independent convolutions to be
executed in parallel. Executing convolutions in parallel can lead to | |
"""Database helper functions for single database connectivity
Manage saved settings for Db2 connections
db_connect() - Connect using loaded/prompted credentials
db_connect_prompt() - Prompts for connection settings
db_connected() - Return connection status
db_connection() - Return handle for current connection
db_disconnect() - Disconnect
db_error() - Handle Db2 Errors
db_keys_get() - Load secret key
db_keys_lock() - Lock secret key using password
db_keys_set() - Set/save secret key
db_keys_unlock() - Unlock secret key using password
db_load_settings() - Load saved settings
db_save_settings() - Save current settings
db_show_settings() - Display loaded settings
password_to_key() - Convert text pass-phrase to usable key for lock/unlock
table_list() - Get list of Db2 tables
The secret key is generated and stored in a file in the
user's home directory. The secret key itself is not encrypted
until the db_keys_lock() function is called. Once encrypted,
the secret key can be decrypted temporarily by supplying the password
either via the --password option (where applicable) or when
prompted. The secret key can be unlocked (and saved in that state)
using the db_keys_unlock() function.
"""
import base64
import collections
import os
import pickle
import stat
from pathlib import Path
from getpass import getpass
from hashlib import blake2b
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.fernet import Fernet
import ibm_db
_hdbc = None
_sqlerror = None
_sqlcode = None
_sqlstate = None
_default_environment = "dev"
_default_settings_location = Path("") # Location for dev_host_db.pickle files
_default_secret_key_location = Path.home() # Location of secret key file (user's home directory)
_secretkeyfile = _default_secret_key_location / ".db2_helpers.secret.key"
_default_secretkey = collections.OrderedDict([
("secret", None),
("locked", False),
("hash", ""),
("secrethash", "")
])
_default_settings = collections.OrderedDict([
("database", "sample"),
("hostname", "localhost"),
("protocol", "tcpip"),
("port", "50000"),
("security", "nossl"),
("servercert", "db2inst1.arm"),
("uid", "db2inst1"),
("pwd", "password"),
("environment", _default_environment),
("secrethash", "") # Hash of secret key used to encrypt password
])
_settings = _default_settings.copy()
_prompt_label = collections.OrderedDict([
("database", "database name"),
("hostname", "host name for database"),
("protocol", "protocol for database"),
("port", "port for tcpip connection"),
("servercert", "certificate file for database"),
("uid", "userid for database connection"),
("pwd", "password for database connection"),
])
def db_connect(settings: collections.OrderedDict = None) -> ibm_db.IBM_DBConnection or None:
"""Connect to Db2"""
global _hdbc
if _hdbc and db_connected():
return _hdbc
if not settings:
print("Settings not loaded")
_hdbc = None
return _hdbc
try:
if not settings["database"]:
print("Settings are incorrect")
_hdbc = None
return _hdbc
except KeyError:
print("Settings content is corrupted")
_hdbc = None
return _hdbc
if "security" in settings and settings["security"].upper() == "SSL":
dsn = (
"DRIVER={{IBM DB2 ODBC DRIVER}};"
"DATABASE={0};"
"HOSTNAME={1};"
"PORT={2};"
"PROTOCOL=TCPIP;"
"UID={3};"
"PWD={4};"
"SECURITY=SSL;SSLServerCertificate={5}").format(settings["database"],
settings["hostname"],
settings["port"],
settings["uid"],
settings["pwd"],
settings["servercert"])
else:
dsn = (
"DRIVER={{IBM DB2 ODBC DRIVER}};"
"DATABASE={0};"
"HOSTNAME={1};"
"PORT={2};"
"PROTOCOL=TCPIP;"
"UID={3};"
"PWD={4};").format(settings["database"],
settings["hostname"],
settings["port"],
settings["uid"],
settings["pwd"])
# Get a database handle (hdbc) for subsequent access to DB2
try:
_hdbc = ibm_db.connect(dsn, "", "")
except Exception as err:
print(str(err))
_hdbc = None
return _hdbc
def db_connect_prompt(database=None, hostname=None) -> collections.OrderedDict or None:
"""Prompt for connection settings, do not actually connect"""
global _default_settings, _prompt_label
settings = _default_settings.copy()
if database:
settings["database"] = database
if hostname:
settings["hostname"] = hostname
print("Enter the database connection details (Enter a period '.' to cancel input")
for k in settings.keys():
if k in ["servercert", "hash", "secrethash", "environment"]:
pass
elif k == "pwd":
x = getpass("Enter password: ")
if x == ".":
return None
if x:
settings[k] = x
elif k == "security":
prompt_string = "Enter 'SSL' to use an encrypted connection[" + settings[k] + "]: "
x = input(prompt_string).lower() or settings[k]
if x == ".":
return None
m = "servercert"
settings[k] = x
if x == "ssl":
y = input("Enter the name of the .ARM file containing the server certificate["
+ settings[m] + "]: ") or settings[m]
if y == ".":
return None
z = Path(y)
if z.is_file() and os.access(y, os.R_OK):
settings[m] = y
else:
print("Unable to access file", z)
return None
else:
settings[m] = ""
else:
prompt_string = "Enter the " + _prompt_label[k] + "[" + settings[k] + "]: "
x = input(prompt_string)
if x == ".":
return None
if x:
settings[k] = x.lower()
return settings
def db_connected(hdbc=None) -> bool:
""" Return state of Db2 connection"""
global _hdbc
if hdbc:
return ibm_db.active(hdbc)
if _hdbc:
return ibm_db.active(_hdbc)
return False
def db_connection() -> ibm_db.IBM_DBConnection or None:
""" Return Db2 connection handle"""
global _hdbc
return _hdbc
# noinspection PyBroadException
def db_disconnect(hdbc=None):
"""Disconnect from the database"""
if hdbc:
use_hdbc = hdbc
else:
use_hdbc = _hdbc
try:
ibm_db.close(use_hdbc)
except Exception:
db_error(False)
# noinspection PyBroadException
def db_error(quiet):
"""Handle Db2 Errors"""
global _sqlerror, _sqlcode, _sqlstate
errmsg = ibm_db.stmt_errormsg().replace("\r", " ")
errmsg = errmsg[errmsg.rfind("]") + 1:].strip()
_sqlerror = errmsg
msg_start = errmsg.find("SQLSTATE=")
if msg_start != -1:
msg_end = errmsg.find(" ", msg_start)
if msg_end == -1:
msg_end = len(errmsg)
_sqlstate = errmsg[msg_start + 9:msg_end]
else:
_sqlstate = "0"
msg_start = errmsg.find("SQLCODE=")
if msg_start != -1:
msg_end = errmsg.find(" ", msg_start)
if msg_end == -1:
msg_end = len(errmsg)
_sqlcode = errmsg[msg_start + 8:msg_end]
try:
_sqlcode = int(_sqlcode)
except Exception:
pass
else:
_sqlcode = 0
if quiet:
return
print(errmsg)
def db_keys_get(password=<PASSWORD>, prompt=True) -> collections.OrderedDict:
"""Load saved secret key"""
global _secretkeyfile, _default_secretkey
passphrase = ""
try:
with open(_secretkeyfile, "rb") as f:
secretkey = pickle.load(f)
if secretkey["locked"]:
getit = True
if password:
passphrase = password
if secretkey["hash"] == blake2b(str.encode(passphrase)).hexdigest():
print("Secret key file is locked.")
print("Using supplied password for temporary unlock.")
else:
print("Secret key file is locked.")
print("Supplied unlock password does not match secret")
elif prompt:
print("Secret key file is locked.")
print("No secret password supplied")
attempts = 0
while getit:
attempts += 1
if attempts > 9:
getit = False
passphrase = getpass("Enter password: ")
if secretkey["hash"] == blake2b(str.encode(passphrase)).hexdigest():
getit = False
k = Fernet(password_to_key(passphrase))
secretkey["secret"] = k.decrypt(str.encode(secretkey["secret"])).decode()
secretkey["locked"] = False
except FileNotFoundError:
print("Secret key file does not exist, creating new one")
secretkey = _default_secretkey.copy()
secretkey = db_keys_set(secretkey, True)
return secretkey
# noinspection PyBroadException
def db_keys_lock(passphrase) -> bool:
"""Lock secret key with a pass phrase"""
global _secretkeyfile
try:
with open(_secretkeyfile, "rb") as f:
secretkey = pickle.load(f)
if secretkey["locked"]:
print("Secret key file is already locked")
return True
if passphrase:
usepass = passphrase
else:
usepass = getpass("Enter pass phrase: ")
usepass2 = getpass("Enter pass phrase again: ")
print("")
if usepass != usepass2:
print("Pass phrase mismatch, secret key still unlocked")
return False
if usepass:
k = Fernet(password_to_key(usepass))
secretkey["secret"] = k.encrypt(str.encode(secretkey["secret"])).decode()
secretkey["locked"] = True
secretkey["hash"] = blake2b(str.encode(usepass)).hexdigest()
db_keys_set(secretkey, False)
except Exception:
print("Error locking secret key content")
return False
print("Secret key successfully locked")
return True
# noinspection PyBroadException
def db_keys_set(secretkey: collections.OrderedDict, newkey=False) -> collections.OrderedDict:
"""Save secret key with option to generate a new one"""
global _secretkeyfile
global _default_secretkey
if newkey:
secret = Fernet.generate_key() # Create new secret
secrethash = blake2b(secret).hexdigest()
secretkey = _default_secretkey
secretkey["secret"] = secret.decode()
secretkey["locked"] = False
secretkey["hash"] = None
secretkey["secrethash"] = secrethash
try:
with open(_secretkeyfile, "wb") as f:
pickle.dump(secretkey, f)
except PermissionError:
print("Failed trying to write secret key file (permissions).")
return collections.OrderedDict()
except FileNotFoundError:
print("Failed trying to write secret key file (not found).")
return collections.OrderedDict()
try:
os.chmod(_secretkeyfile, stat.S_IRUSR | stat.S_IWUSR)
except PermissionError:
print("Failed setting permissions on secret key file.")
return collections.OrderedDict()
return secretkey
# noinspection PyBroadException
def db_keys_unlock(passphrase) -> bool:
"""Unlock secret key with pass phrase"""
global _secretkeyfile
try:
with open(_secretkeyfile, "rb") as f:
secretkey = pickle.load(f)
if not secretkey["locked"]:
print("Secret key file is already unlocked")
return True
if passphrase:
usepass = passphrase
else:
usepass = getpass("Enter pass phrase: ")
print("")
if usepass:
if secretkey["hash"] == blake2b(str.encode(usepass)).hexdigest():
k = Fernet(password_to_key(usepass))
secretkey["secret"] = k.decrypt(str.encode(secretkey["secret"])).decode()
secretkey["locked"] = False
db_keys_set(secretkey, False)
else:
print("Pass phrase did not match, secret key remains locked")
return False
except Exception:
print("Error locking secret key content")
return False
print("Secret key successfully unlocked")
return True
# noinspection PyBroadException
def db_load_settings(database, hostname, environment=_default_environment,
password=None) -> collections.OrderedDict or None:
"""Load saved settings"""
global _default_settings_location
keys = db_keys_get(password)
fname = _default_settings_location / str(
environment.lower() + "_" + hostname.lower() + "_" + database.lower() + ".pickle")
try:
with open(fname, "rb") as f:
settings = pickle.load(f)
if keys:
if settings["secrethash"] == keys["secrethash"]:
k = Fernet(str.encode(keys["secret"]))
settings["pwd"] = k.decrypt(str.encode(settings["pwd"])).decode()
else:
print("Saved settings are incorrect, wrong secret key")
return None
except Exception:
return None
return settings
def db_save_settings(settings: collections.OrderedDict, password=None) -> bool:
"""Save settings"""
global _default_secretkey
use_settings = settings.copy()
keys = db_keys_get(password)
if not keys or "secret" not in keys or not keys["secret"]:
print("Setting up new secret key | |
<reponame>pulumi/pulumi-vault<filename>sdk/python/pulumi_vault/pkisecret/secret_backend_role.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SecretBackendRoleArgs', 'SecretBackendRole']
@pulumi.input_type
class SecretBackendRoleArgs:
def __init__(__self__, *,
backend: pulumi.Input[str],
allow_any_name: Optional[pulumi.Input[bool]] = None,
allow_bare_domains: Optional[pulumi.Input[bool]] = None,
allow_glob_domains: Optional[pulumi.Input[bool]] = None,
allow_ip_sans: Optional[pulumi.Input[bool]] = None,
allow_localhost: Optional[pulumi.Input[bool]] = None,
allow_subdomains: Optional[pulumi.Input[bool]] = None,
allowed_domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_domains_template: Optional[pulumi.Input[bool]] = None,
allowed_other_sans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_uri_sans: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
basic_constraints_valid_for_non_ca: Optional[pulumi.Input[bool]] = None,
client_flag: Optional[pulumi.Input[bool]] = None,
code_signing_flag: Optional[pulumi.Input[bool]] = None,
countries: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
email_protection_flag: Optional[pulumi.Input[bool]] = None,
enforce_hostnames: Optional[pulumi.Input[bool]] = None,
ext_key_usages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
generate_lease: Optional[pulumi.Input[bool]] = None,
key_bits: Optional[pulumi.Input[int]] = None,
key_type: Optional[pulumi.Input[str]] = None,
key_usages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
localities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_ttl: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
no_store: Optional[pulumi.Input[bool]] = None,
not_before_duration: Optional[pulumi.Input[str]] = None,
organization_unit: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
organizations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
policy_identifiers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
postal_codes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
provinces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
require_cn: Optional[pulumi.Input[bool]] = None,
server_flag: Optional[pulumi.Input[bool]] = None,
street_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[str]] = None,
use_csr_common_name: Optional[pulumi.Input[bool]] = None,
use_csr_sans: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a SecretBackendRole resource.
:param pulumi.Input[str] backend: The path the PKI secret backend is mounted at, with no leading or trailing `/`s.
:param pulumi.Input[bool] allow_any_name: Flag to allow any name
:param pulumi.Input[bool] allow_bare_domains: Flag to allow certificates matching the actual domain
:param pulumi.Input[bool] allow_glob_domains: Flag to allow names containing glob patterns.
:param pulumi.Input[bool] allow_ip_sans: Flag to allow IP SANs
:param pulumi.Input[bool] allow_localhost: Flag to allow certificates for localhost
:param pulumi.Input[bool] allow_subdomains: Flag to allow certificates matching subdomains
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_domains: List of allowed domains for certificates
:param pulumi.Input[bool] allowed_domains_template: Flag, if set, `allowed_domains` can be specified using identity template expressions such as `{{identity.entity.aliases.<mount accessor>.name}}`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_other_sans: Defines allowed custom SANs
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_uri_sans: Defines allowed URI SANs
:param pulumi.Input[bool] basic_constraints_valid_for_non_ca: Flag to mark basic constraints valid when issuing non-CA certificates
:param pulumi.Input[bool] client_flag: Flag to specify certificates for client use
:param pulumi.Input[bool] code_signing_flag: Flag to specify certificates for code signing use
:param pulumi.Input[Sequence[pulumi.Input[str]]] countries: The country of generated certificates
:param pulumi.Input[bool] email_protection_flag: Flag to specify certificates for email protection use
:param pulumi.Input[bool] enforce_hostnames: Flag to allow only valid host names
:param pulumi.Input[Sequence[pulumi.Input[str]]] ext_key_usages: Specify the allowed extended key usage constraint on issued certificates
:param pulumi.Input[bool] generate_lease: Flag to generate leases with certificates
:param pulumi.Input[int] key_bits: The number of bits of generated keys
:param pulumi.Input[str] key_type: The type of generated keys
:param pulumi.Input[Sequence[pulumi.Input[str]]] key_usages: Specify the allowed key usage constraint on issued certificates
:param pulumi.Input[Sequence[pulumi.Input[str]]] localities: The locality of generated certificates
:param pulumi.Input[str] max_ttl: The maximum TTL
:param pulumi.Input[str] name: The name to identify this role within the backend. Must be unique within the backend.
:param pulumi.Input[bool] no_store: Flag to not store certificates in the storage backend
:param pulumi.Input[str] not_before_duration: Specifies the duration by which to backdate the NotBefore property.
:param pulumi.Input[Sequence[pulumi.Input[str]]] organization_unit: The organization unit of generated certificates
:param pulumi.Input[Sequence[pulumi.Input[str]]] organizations: The organization of generated certificates
:param pulumi.Input[Sequence[pulumi.Input[str]]] policy_identifiers: Specify the list of allowed policies IODs
:param pulumi.Input[Sequence[pulumi.Input[str]]] postal_codes: The postal code of generated certificates
:param pulumi.Input[Sequence[pulumi.Input[str]]] provinces: The province of generated certificates
:param pulumi.Input[bool] require_cn: Flag to force CN usage
:param pulumi.Input[bool] server_flag: Flag to specify certificates for server use
:param pulumi.Input[Sequence[pulumi.Input[str]]] street_addresses: The street address of generated certificates
:param pulumi.Input[str] ttl: The TTL
:param pulumi.Input[bool] use_csr_common_name: Flag to use the CN in the CSR
:param pulumi.Input[bool] use_csr_sans: Flag to use the SANs in the CSR
"""
pulumi.set(__self__, "backend", backend)
if allow_any_name is not None:
pulumi.set(__self__, "allow_any_name", allow_any_name)
if allow_bare_domains is not None:
pulumi.set(__self__, "allow_bare_domains", allow_bare_domains)
if allow_glob_domains is not None:
pulumi.set(__self__, "allow_glob_domains", allow_glob_domains)
if allow_ip_sans is not None:
pulumi.set(__self__, "allow_ip_sans", allow_ip_sans)
if allow_localhost is not None:
pulumi.set(__self__, "allow_localhost", allow_localhost)
if allow_subdomains is not None:
pulumi.set(__self__, "allow_subdomains", allow_subdomains)
if allowed_domains is not None:
pulumi.set(__self__, "allowed_domains", allowed_domains)
if allowed_domains_template is not None:
pulumi.set(__self__, "allowed_domains_template", allowed_domains_template)
if allowed_other_sans is not None:
pulumi.set(__self__, "allowed_other_sans", allowed_other_sans)
if allowed_uri_sans is not None:
pulumi.set(__self__, "allowed_uri_sans", allowed_uri_sans)
if basic_constraints_valid_for_non_ca is not None:
pulumi.set(__self__, "basic_constraints_valid_for_non_ca", basic_constraints_valid_for_non_ca)
if client_flag is not None:
pulumi.set(__self__, "client_flag", client_flag)
if code_signing_flag is not None:
pulumi.set(__self__, "code_signing_flag", code_signing_flag)
if countries is not None:
pulumi.set(__self__, "countries", countries)
if email_protection_flag is not None:
pulumi.set(__self__, "email_protection_flag", email_protection_flag)
if enforce_hostnames is not None:
pulumi.set(__self__, "enforce_hostnames", enforce_hostnames)
if ext_key_usages is not None:
pulumi.set(__self__, "ext_key_usages", ext_key_usages)
if generate_lease is not None:
pulumi.set(__self__, "generate_lease", generate_lease)
if key_bits is not None:
pulumi.set(__self__, "key_bits", key_bits)
if key_type is not None:
pulumi.set(__self__, "key_type", key_type)
if key_usages is not None:
pulumi.set(__self__, "key_usages", key_usages)
if localities is not None:
pulumi.set(__self__, "localities", localities)
if max_ttl is not None:
pulumi.set(__self__, "max_ttl", max_ttl)
if name is not None:
pulumi.set(__self__, "name", name)
if no_store is not None:
pulumi.set(__self__, "no_store", no_store)
if not_before_duration is not None:
pulumi.set(__self__, "not_before_duration", not_before_duration)
if organization_unit is not None:
pulumi.set(__self__, "organization_unit", organization_unit)
if organizations is not None:
pulumi.set(__self__, "organizations", organizations)
if policy_identifiers is not None:
pulumi.set(__self__, "policy_identifiers", policy_identifiers)
if postal_codes is not None:
pulumi.set(__self__, "postal_codes", postal_codes)
if provinces is not None:
pulumi.set(__self__, "provinces", provinces)
if require_cn is not None:
pulumi.set(__self__, "require_cn", require_cn)
if server_flag is not None:
pulumi.set(__self__, "server_flag", server_flag)
if street_addresses is not None:
pulumi.set(__self__, "street_addresses", street_addresses)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
if use_csr_common_name is not None:
pulumi.set(__self__, "use_csr_common_name", use_csr_common_name)
if use_csr_sans is not None:
pulumi.set(__self__, "use_csr_sans", use_csr_sans)
@property
@pulumi.getter
def backend(self) -> pulumi.Input[str]:
"""
The path the PKI secret backend is mounted at, with no leading or trailing `/`s.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: pulumi.Input[str]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter(name="allowAnyName")
def allow_any_name(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to allow any name
"""
return pulumi.get(self, "allow_any_name")
@allow_any_name.setter
def allow_any_name(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_any_name", value)
@property
@pulumi.getter(name="allowBareDomains")
def allow_bare_domains(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to allow certificates matching the actual domain
"""
return pulumi.get(self, "allow_bare_domains")
@allow_bare_domains.setter
def allow_bare_domains(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_bare_domains", value)
@property
@pulumi.getter(name="allowGlobDomains")
def allow_glob_domains(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to allow names containing glob patterns.
"""
return pulumi.get(self, "allow_glob_domains")
@allow_glob_domains.setter
def allow_glob_domains(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_glob_domains", value)
@property
@pulumi.getter(name="allowIpSans")
def allow_ip_sans(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to allow IP SANs
"""
return pulumi.get(self, "allow_ip_sans")
@allow_ip_sans.setter
def allow_ip_sans(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_ip_sans", value)
@property
@pulumi.getter(name="allowLocalhost")
def allow_localhost(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to allow certificates for localhost
"""
return pulumi.get(self, "allow_localhost")
@allow_localhost.setter
def allow_localhost(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_localhost", value)
@property
@pulumi.getter(name="allowSubdomains")
def allow_subdomains(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to allow certificates matching subdomains
"""
return pulumi.get(self, "allow_subdomains")
@allow_subdomains.setter
def allow_subdomains(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_subdomains", value)
@property
@pulumi.getter(name="allowedDomains")
def allowed_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of allowed domains for certificates
"""
return pulumi.get(self, "allowed_domains")
@allowed_domains.setter
def allowed_domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_domains", value)
@property
@pulumi.getter(name="allowedDomainsTemplate")
def allowed_domains_template(self) -> Optional[pulumi.Input[bool]]:
"""
Flag, if set, `allowed_domains` can be specified using identity template expressions such as `{{identity.entity.aliases.<mount accessor>.name}}`.
"""
return pulumi.get(self, "allowed_domains_template")
@allowed_domains_template.setter
def allowed_domains_template(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allowed_domains_template", value)
@property
@pulumi.getter(name="allowedOtherSans")
def allowed_other_sans(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Defines allowed custom SANs
"""
return pulumi.get(self, "allowed_other_sans")
@allowed_other_sans.setter
def allowed_other_sans(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_other_sans", value)
@property
@pulumi.getter(name="allowedUriSans")
def allowed_uri_sans(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Defines allowed URI SANs
"""
return pulumi.get(self, "allowed_uri_sans")
@allowed_uri_sans.setter
def allowed_uri_sans(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_uri_sans", value)
@property
@pulumi.getter(name="basicConstraintsValidForNonCa")
def basic_constraints_valid_for_non_ca(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to mark basic constraints valid when issuing non-CA certificates
"""
return pulumi.get(self, "basic_constraints_valid_for_non_ca")
@basic_constraints_valid_for_non_ca.setter
def basic_constraints_valid_for_non_ca(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "basic_constraints_valid_for_non_ca", value)
@property
@pulumi.getter(name="clientFlag")
def client_flag(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to specify certificates for client use
"""
return pulumi.get(self, "client_flag")
@client_flag.setter
def client_flag(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "client_flag", value)
@property
@pulumi.getter(name="codeSigningFlag")
def code_signing_flag(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to specify certificates for code signing use
"""
return pulumi.get(self, "code_signing_flag")
@code_signing_flag.setter
def code_signing_flag(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "code_signing_flag", value)
@property
@pulumi.getter
def countries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The country of generated | |
<filename>sunshine_conversations_client/model/web_update_all_of.py
# coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.undefined import Undefined
class WebUpdateAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'brand_color': 'str',
'fixed_intro_pane': 'bool',
'conversation_color': 'str',
'action_color': 'str',
'display_style': 'str',
'button_icon_url': 'str',
'button_width': 'str',
'button_height': 'str',
'integration_order': 'list[str]',
'business_name': 'str',
'business_icon_url': 'str',
'background_image_url': 'str',
'origin_whitelist': 'list[str]',
'prechat_capture': 'PrechatCapture',
'can_user_create_more_conversations': 'bool'
}
attribute_map = {
'brand_color': 'brandColor',
'fixed_intro_pane': 'fixedIntroPane',
'conversation_color': 'conversationColor',
'action_color': 'actionColor',
'display_style': 'displayStyle',
'button_icon_url': 'buttonIconUrl',
'button_width': 'buttonWidth',
'button_height': 'buttonHeight',
'integration_order': 'integrationOrder',
'business_name': 'businessName',
'business_icon_url': 'businessIconUrl',
'background_image_url': 'backgroundImageUrl',
'origin_whitelist': 'originWhitelist',
'prechat_capture': 'prechatCapture',
'can_user_create_more_conversations': 'canUserCreateMoreConversations'
}
nulls = set()
def __init__(self, brand_color='65758e', fixed_intro_pane=False, conversation_color='0099ff', action_color='0099ff', display_style='button', button_icon_url=Undefined(), button_width='58', button_height='58', integration_order=Undefined(), business_name=None, business_icon_url=None, background_image_url=None, origin_whitelist=Undefined(), prechat_capture=None, can_user_create_more_conversations=None, local_vars_configuration=None): # noqa: E501
"""WebUpdateAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._brand_color = None
self._fixed_intro_pane = None
self._conversation_color = None
self._action_color = None
self._display_style = None
self._button_icon_url = None
self._button_width = None
self._button_height = None
self._integration_order = None
self._business_name = None
self._business_icon_url = None
self._background_image_url = None
self._origin_whitelist = None
self._prechat_capture = None
self._can_user_create_more_conversations = None
self.discriminator = None
if brand_color is not None:
self.brand_color = brand_color
if fixed_intro_pane is not None:
self.fixed_intro_pane = fixed_intro_pane
if conversation_color is not None:
self.conversation_color = conversation_color
if action_color is not None:
self.action_color = action_color
if display_style is not None:
self.display_style = display_style
self.button_icon_url = button_icon_url
if button_width is not None:
self.button_width = button_width
if button_height is not None:
self.button_height = button_height
self.integration_order = integration_order
if business_name is not None:
self.business_name = business_name
if business_icon_url is not None:
self.business_icon_url = business_icon_url
if background_image_url is not None:
self.background_image_url = background_image_url
self.origin_whitelist = origin_whitelist
if prechat_capture is not None:
self.prechat_capture = prechat_capture
if can_user_create_more_conversations is not None:
self.can_user_create_more_conversations = can_user_create_more_conversations
@property
def brand_color(self):
"""Gets the brand_color of this WebUpdateAllOf. # noqa: E501
This color will be used in the messenger header and the button or tab in idle state. Must be a 3 or 6-character hexadecimal color. # noqa: E501
:return: The brand_color of this WebUpdateAllOf. # noqa: E501
:rtype: str
"""
return self._brand_color
@brand_color.setter
def brand_color(self, brand_color):
"""Sets the brand_color of this WebUpdateAllOf.
This color will be used in the messenger header and the button or tab in idle state. Must be a 3 or 6-character hexadecimal color. # noqa: E501
:param brand_color: The brand_color of this WebUpdateAllOf. # noqa: E501
:type: str
"""
self._brand_color = brand_color
@property
def fixed_intro_pane(self):
"""Gets the fixed_intro_pane of this WebUpdateAllOf. # noqa: E501
When true, the introduction pane will be pinned at the top of the conversation instead of scrolling with it. # noqa: E501
:return: The fixed_intro_pane of this WebUpdateAllOf. # noqa: E501
:rtype: bool
"""
return self._fixed_intro_pane
@fixed_intro_pane.setter
def fixed_intro_pane(self, fixed_intro_pane):
"""Sets the fixed_intro_pane of this WebUpdateAllOf.
When true, the introduction pane will be pinned at the top of the conversation instead of scrolling with it. # noqa: E501
:param fixed_intro_pane: The fixed_intro_pane of this WebUpdateAllOf. # noqa: E501
:type: bool
"""
self._fixed_intro_pane = fixed_intro_pane
@property
def conversation_color(self):
"""Gets the conversation_color of this WebUpdateAllOf. # noqa: E501
This color will be used for customer messages, quick replies and actions in the footer. Must be a 3 or 6-character hexadecimal color. # noqa: E501
:return: The conversation_color of this WebUpdateAllOf. # noqa: E501
:rtype: str
"""
return self._conversation_color
@conversation_color.setter
def conversation_color(self, conversation_color):
"""Sets the conversation_color of this WebUpdateAllOf.
This color will be used for customer messages, quick replies and actions in the footer. Must be a 3 or 6-character hexadecimal color. # noqa: E501
:param conversation_color: The conversation_color of this WebUpdateAllOf. # noqa: E501
:type: str
"""
self._conversation_color = conversation_color
@property
def action_color(self):
"""Gets the action_color of this WebUpdateAllOf. # noqa: E501
This color will be used for call-to-actions inside your messages. Must be a 3 or 6-character hexadecimal color. # noqa: E501
:return: The action_color of this WebUpdateAllOf. # noqa: E501
:rtype: str
"""
return self._action_color
@action_color.setter
def action_color(self, action_color):
"""Sets the action_color of this WebUpdateAllOf.
This color will be used for call-to-actions inside your messages. Must be a 3 or 6-character hexadecimal color. # noqa: E501
:param action_color: The action_color of this WebUpdateAllOf. # noqa: E501
:type: str
"""
self._action_color = action_color
@property
def display_style(self):
"""Gets the display_style of this WebUpdateAllOf. # noqa: E501
Choose how the messenger will appear on your website. Must be either button or tab. # noqa: E501
:return: The display_style of this WebUpdateAllOf. # noqa: E501
:rtype: str
"""
return self._display_style
@display_style.setter
def display_style(self, display_style):
"""Sets the display_style of this WebUpdateAllOf.
Choose how the messenger will appear on your website. Must be either button or tab. # noqa: E501
:param display_style: The display_style of this WebUpdateAllOf. # noqa: E501
:type: str
"""
self._display_style = display_style
@property
def button_icon_url(self):
"""Gets the button_icon_url of this WebUpdateAllOf. # noqa: E501
With the button style Web Messenger, you have the option of selecting your own button icon. The image must be at least 200 x 200 pixels and must be in either JPG, PNG, or GIF format. # noqa: E501
:return: The button_icon_url of this WebUpdateAllOf. # noqa: E501
:rtype: str
"""
return self._button_icon_url
@button_icon_url.setter
def button_icon_url(self, button_icon_url):
"""Sets the button_icon_url of this WebUpdateAllOf.
With the button style Web Messenger, you have the option of selecting your own button icon. The image must be at least 200 x 200 pixels and must be in either JPG, PNG, or GIF format. # noqa: E501
:param button_icon_url: The button_icon_url of this WebUpdateAllOf. # noqa: E501
:type: str
"""
if type(button_icon_url) is Undefined:
button_icon_url = None
self.nulls.discard("button_icon_url")
elif button_icon_url is None:
self.nulls.add("button_icon_url")
else:
self.nulls.discard("button_icon_url")
self._button_icon_url = button_icon_url
@property
def button_width(self):
"""Gets the button_width of this WebUpdateAllOf. # noqa: E501
With the button style Web Messenger, you have the option of specifying the button width. # noqa: E501
:return: The button_width of this WebUpdateAllOf. # noqa: E501
:rtype: str
"""
return self._button_width
@button_width.setter
def button_width(self, button_width):
"""Sets the button_width of this WebUpdateAllOf.
With the button style Web Messenger, you have the option of specifying the button width. # noqa: E501
:param button_width: The button_width of this WebUpdateAllOf. # noqa: E501
:type: str
"""
self._button_width = button_width
@property
def button_height(self):
"""Gets the button_height of this WebUpdateAllOf. # noqa: E501
With the button style Web Messenger, you have the option of specifying the button height. # noqa: E501
:return: The button_height of this WebUpdateAllOf. # noqa: E501
:rtype: str
"""
return self._button_height
@button_height.setter
def button_height(self, button_height):
"""Sets the button_height of this WebUpdateAllOf.
With the button style Web Messenger, you have the option of specifying the button height. # noqa: E501
:param button_height: The button_height of this WebUpdateAllOf. # noqa: E501
:type: str
"""
self._button_height = button_height
@property
def integration_order(self):
"""Gets the integration_order of this WebUpdateAllOf. # noqa: E501
Array of integration IDs, order will be reflected in the Web Messenger. When set, only integrations from this list will be displayed in the Web Messenger. If unset, all integrations will be displayed. # noqa: E501
:return: The integration_order of this WebUpdateAllOf. # noqa: E501
:rtype: list[str]
"""
return self._integration_order
@integration_order.setter
def integration_order(self, integration_order):
"""Sets the integration_order of this WebUpdateAllOf.
Array of integration IDs, order will be reflected in the Web Messenger. When set, only integrations from this list will be displayed in the Web Messenger. If unset, all integrations will be displayed. # noqa: E501
:param integration_order: The integration_order of this WebUpdateAllOf. # noqa: E501
:type: list[str]
"""
if type(integration_order) is Undefined:
| |
("xRealloc", c_void_p), # Resize an allocation
("xSize", c_void_p), # Return the size of an allocation
("xRoundup", c_void_p), # Round up request size to allocation size
("xInit", c_void_p), # Initialize the memory allocator
("xShutdown", c_void_p), # Deinitialize the memory allocator
("pAppData", c_void_p), # Argument to xInit() and xShutdown()
]
# Configuration Options
SQLITE_CONFIG_SINGLETHREAD = 1 # nil
SQLITE_CONFIG_MULTITHREAD = 2 # nil
SQLITE_CONFIG_SERIALIZED = 3 # nil
SQLITE_CONFIG_MALLOC = 4 # sqlite3_mem_methods*
SQLITE_CONFIG_GETMALLOC = 5 # sqlite3_mem_methods*
SQLITE_CONFIG_SCRATCH = 6 # No longer used
SQLITE_CONFIG_PAGECACHE = 7 # void*, int sz, int N
SQLITE_CONFIG_HEAP = 8 # void*, int nByte, int min
SQLITE_CONFIG_MEMSTATUS = 9 # boolean
SQLITE_CONFIG_MUTEX = 10 # sqlite3_mutex_methods*
SQLITE_CONFIG_GETMUTEX = 11 # sqlite3_mutex_methods*
# previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused.
SQLITE_CONFIG_LOOKASIDE = 13 # int int
SQLITE_CONFIG_PCACHE = 14 # no-op
SQLITE_CONFIG_GETPCACHE = 15 # no-op
SQLITE_CONFIG_LOG = 16 # xFunc, void*
SQLITE_CONFIG_URI = 17 # int
SQLITE_CONFIG_PCACHE2 = 18 # sqlite3_pcache_methods2*
SQLITE_CONFIG_GETPCACHE2 = 19 # sqlite3_pcache_methods2*
SQLITE_CONFIG_COVERING_INDEX_SCAN = 20 # int
SQLITE_CONFIG_SQLLOG = 21 # xSqllog, void*
SQLITE_CONFIG_MMAP_SIZE = 22 # sqlite3_int64, sqlite3_int64
SQLITE_CONFIG_WIN32_HEAPSIZE = 23 # int nByte
SQLITE_CONFIG_PCACHE_HDRSZ = 24 # int *psz
SQLITE_CONFIG_PMASZ = 25 # unsigned int szPma
SQLITE_CONFIG_STMTJRNL_SPILL = 26 # int nByte
SQLITE_CONFIG_SMALL_MALLOC = 27 # boolean
SQLITE_CONFIG_SORTERREF_SIZE = 28 # int nByte
SQLITE_CONFIG_MEMDB_MAXSIZE = 29 # sqlite3_int64
# Database Connection Configuration Options
SQLITE_DBCONFIG_MAINDBNAME = 1000 # const char*
SQLITE_DBCONFIG_LOOKASIDE = 1001 # void* int int
SQLITE_DBCONFIG_ENABLE_FKEY = 1002 # int int*
SQLITE_DBCONFIG_ENABLE_TRIGGER = 1003 # int int*
SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER = 1004 # int int*
SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION = 1005 # int int*
SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE = 1006 # int int*
SQLITE_DBCONFIG_ENABLE_QPSG = 1007 # int int*
SQLITE_DBCONFIG_TRIGGER_EQP = 1008 # int int*
SQLITE_DBCONFIG_RESET_DATABASE = 1009 # int int*
SQLITE_DBCONFIG_DEFENSIVE = 1010 # int int*
SQLITE_DBCONFIG_WRITABLE_SCHEMA = 1011 # int int*
SQLITE_DBCONFIG_LEGACY_ALTER_TABLE = 1012 # int int*
SQLITE_DBCONFIG_DQS_DML = 1013 # int int*
SQLITE_DBCONFIG_DQS_DDL = 1014 # int int*
SQLITE_DBCONFIG_ENABLE_VIEW = 1015 # int int*
SQLITE_DBCONFIG_LEGACY_FILE_FORMAT = 1016 # int int*
SQLITE_DBCONFIG_MAX = 1016 # Largest DBCONFIG
# Enable Or Disable Extended Result Codes
@annotate
def sqlite3_extended_result_codes(db: sqlite3_p, onoff: c_int) -> c_int:
pass
# Last Insert Rowid
@annotate
def sqlite3_last_insert_rowid(db: sqlite3_p) -> sqlite3_int64:
pass
# Set the Last Insert Rowid value
# @annotate
# def sqlite3_set_last_insert_rowid(db: sqlite3_p, rowid: sqlite3_int64):
# pass
# Count The Number Of Rows Modified
@annotate
def sqlite3_changes(db: sqlite3_p) -> c_int:
pass
# Total Number Of Rows Modified
@annotate
def sqlite3_total_changes(db: sqlite3_p) -> c_int:
pass
# Interrupt A Long-Running Query
@annotate
def sqlite3_interrupt(db: sqlite3_p):
pass
# Determine If An SQL Statement Is Complete
@annotate
def sqlite3_complete(sql: c_char_p) -> c_int:
pass
# Register A Callback To Handle SQLITE_BUSY Errors
@annotate
def sqlite3_busy_handler(db: sqlite3_p, callback: c_void_p, pArg: c_void_p) -> c_int:
pass
# Set A Busy Timeout
@annotate
def sqlite3_busy_timeout(db: sqlite3_p, ms: c_int) -> c_int:
pass
# Formatted String Printing Functions
sqlite3_mprintf = libsqlite.sqlite3_mprintf
sqlite3_snprintf = libsqlite.sqlite3_snprintf
# Memory Allocation Subsystem
@annotate
def sqlite3_malloc(size: c_int) -> c_void_p:
pass
@annotate
def sqlite3_malloc64(size: sqlite3_uint64) -> c_void_p:
pass
@annotate
def sqlite3_realloc(ptr: c_void_p, size: c_int) -> c_void_p:
pass
@annotate
def sqlite3_realloc64(ptr: c_void_p, size: sqlite3_uint64) -> c_void_p:
pass
@annotate
def sqlite3_free(ptr: c_void_p):
pass
@annotate
def sqlite3_msize(ptr: c_void_p) -> sqlite3_uint64:
pass
# Memory Allocator Statistics
@annotate
def sqlite3_memory_used() -> sqlite3_int64:
pass
@annotate
def sqlite3_memory_highwater(resetFlag: c_int) -> sqlite3_int64:
pass
# Pseudo-Random Number Generator
@annotate
def sqlite3_randomness(N: c_int, P: c_void_p):
pass
# Compile-Time Authorization Callbacks
@annotate
def sqlite3_set_authorizer(db: sqlite3_p, xAuth: c_void_p, pUserData: c_void_p) -> c_int:
pass
# Authorizer Return Codes
SQLITE_DENY = 1 # Abort the SQL statement with an error
SQLITE_IGNORE = 2 # Don't allow access, but don't generate an error
# Authorizer Action Codes
SQLITE_CREATE_INDEX = 1 # Index Name Table Name
SQLITE_CREATE_TABLE = 2 # Table Name NULL
SQLITE_CREATE_TEMP_INDEX = 3 # Index Name Table Name
SQLITE_CREATE_TEMP_TABLE = 4 # Table Name NULL
SQLITE_CREATE_TEMP_TRIGGER = 5 # Trigger Name Table Name
SQLITE_CREATE_TEMP_VIEW = 6 # View Name NULL
SQLITE_CREATE_TRIGGER = 7 # Trigger Name Table Name
SQLITE_CREATE_VIEW = 8 # View Name NULL
SQLITE_DELETE = 9 # Table Name NULL
SQLITE_DROP_INDEX = 10 # Index Name Table Name
SQLITE_DROP_TABLE = 11 # Table Name NULL
SQLITE_DROP_TEMP_INDEX = 12 # Index Name Table Name
SQLITE_DROP_TEMP_TABLE = 13 # Table Name NULL
SQLITE_DROP_TEMP_TRIGGER = 14 # Trigger Name Table Name
SQLITE_DROP_TEMP_VIEW = 15 # View Name NULL
SQLITE_DROP_TRIGGER = 16 # Trigger Name Table Name
SQLITE_DROP_VIEW = 17 # View Name NULL
SQLITE_INSERT = 18 # Table Name NULL
SQLITE_PRAGMA = 19 # Pragma Name 1st arg or NULL
SQLITE_READ = 20 # Table Name Column Name
SQLITE_SELECT = 21 # NULL NULL
SQLITE_TRANSACTION = 22 # Operation NULL
SQLITE_UPDATE = 23 # Table Name Column Name
SQLITE_ATTACH = 24 # Filename NULL
SQLITE_DETACH = 25 # Database Name NULL
SQLITE_ALTER_TABLE = 26 # Database Name Table Name
SQLITE_REINDEX = 27 # Index Name NULL
SQLITE_ANALYZE = 28 # Table Name NULL
SQLITE_CREATE_VTABLE = 29 # Table Name Module Name
SQLITE_DROP_VTABLE = 30 # Table Name Module Name
SQLITE_FUNCTION = 31 # NULL Function Name
SQLITE_SAVEPOINT = 32 # Operation Savepoint Name
SQLITE_COPY = 0 # No longer used
SQLITE_RECURSIVE = 33 # NULL NULL
# SQL Trace Event Codes
SQLITE_TRACE_STMT = 0x01
SQLITE_TRACE_PROFILE = 0x02
SQLITE_TRACE_ROW = 0x04
SQLITE_TRACE_CLOSE = 0x08
# SQL Trace Hook
@annotate
def sqlite3_trace(db: sqlite3_p, xTrace: c_void_p, pCtx: c_void_p) -> c_void_p:
pass
# @annotate
# def sqlite3_trace_v2(db: sqlite3_p, uMask: c_uint, xCallback: c_void_p, pCtx: c_void_p) -> c_int:
# pass
# Query Progress Callbacks
@annotate
def sqlite3_progress_handler(db: sqlite3_p, n: c_int, callback: c_void_p, param: c_void_p):
pass
# Opening A New Database Connection
@annotate
def sqlite3_open_v2(
filename: c_char_p, # Database filename (UTF-8)
ppDb: POINTER(sqlite3_p), # OUT: SQLite db handle
flags: c_int, # Flags
zVfs: c_char_p # Name of VFS module to use
) -> c_int:
pass
# Obtain Values For URI Parameters
@annotate
def sqlite3_uri_parameter(zFilename: c_char_p, zParam: c_char_p) -> c_char_p:
pass
@annotate
def sqlite3_uri_boolean(zFile: c_char_p, zParam: c_char_p, bDefault: c_int) -> c_int:
pass
@annotate
def sqlite3_uri_int64(zFile: c_char_p, zParam: c_char_p, default: sqlite3_int64) -> sqlite3_int64:
pass
# Error Codes And Messages
@annotate
def sqlite3_errcode(db: sqlite3_p) -> c_int:
pass
@annotate
def sqlite3_extended_errcode(db: sqlite3_p) -> c_int:
pass
@annotate
def sqlite3_errmsg(db: sqlite3_p) -> c_char_p:
pass
@annotate
def sqlite3_errstr(n: c_int) -> c_char_p:
pass
# Prepared Statement Object
sqlite3_stmt_p = c_void_p
# Run-time Limits
@annotate
def sqlite3_limit(db: sqlite3_p, id: c_int, newVal: c_int) -> c_int:
pass
# Run-Time Limit Categories
SQLITE_LIMIT_LENGTH = 0
SQLITE_LIMIT_SQL_LENGTH = 1
SQLITE_LIMIT_COLUMN = 2
SQLITE_LIMIT_EXPR_DEPTH = 3
SQLITE_LIMIT_COMPOUND_SELECT = 4
SQLITE_LIMIT_VDBE_OP = 5
SQLITE_LIMIT_FUNCTION_ARG = 6
SQLITE_LIMIT_ATTACHED = 7
SQLITE_LIMIT_LIKE_PATTERN_LENGTH = 8
SQLITE_LIMIT_VARIABLE_NUMBER = 9
SQLITE_LIMIT_TRIGGER_DEPTH = 10
SQLITE_LIMIT_WORKER_THREADS = 11
# Prepare Flags
SQLITE_PREPARE_PERSISTENT = 0x01
SQLITE_PREPARE_NORMALIZE = 0x02
SQLITE_PREPARE_NO_VTAB = 0x04
# Compiling An SQL Statement
@annotate
def sqlite3_prepare_v2(
db: sqlite3_p, # Database handle
zSql: c_void_p, # SQL statement, UTF-8 encoded
nByte: c_int, # Maximum length of zSql in bytes.
ppStmt: POINTER(sqlite3_stmt_p), # OUT: Statement handle
pzTail: POINTER(c_void_p) # OUT: Pointer to unused portion of zSql
) -> c_int:
pass
# @annotate
# def sqlite3_prepare_v3(
# db: sqlite3_p, # Database handle
# zSql: c_void_p, # SQL statement, UTF-8 encoded
# nByte: c_int, # Maximum length of zSql in bytes.
# prepFlags: c_uint, # Zero or more SQLITE_PREPARE_ flags
# ppStmt: POINTER(sqlite3_stmt_p), # OUT: Statement handle
# pzTail: POINTER(c_void_p) # OUT: Pointer to unused portion of zSql
# ) -> c_int:
# pass
# Retrieving Statement SQL
@annotate
def sqlite3_sql(pStmt: sqlite3_stmt_p) -> c_char_p:
pass
# @annotate
# def sqlite3_expanded_sql(pStmt: sqlite3_stmt_p) -> c_char_p:
# pass
# @annotate
# def sqlite3_normalized_sql(pStmt: sqlite3_stmt_p) -> c_char_p:
# pass
# Determine If An SQL Statement Writes The Database
@annotate
def sqlite3_stmt_readonly(pStmt: sqlite3_stmt_p) -> c_int:
pass
# Query The EXPLAIN Setting For A Prepared Statement
# @annotate
# def sqlite3_stmt_isexplain(pStmt: sqlite3_stmt_p) -> c_int:
# pass
# Determine If A Prepared Statement Has Been Reset
@annotate
def sqlite3_stmt_busy(pStmt: sqlite3_stmt_p) -> c_int:
pass
# Dynamically Typed Value Object
sqlite3_value_p = c_void_p
# SQL Function Context Object
sqlite3_context_p = c_void_p
# Constants Defining Special Destructor Behavior
sqlite3_destructor_type = CFUNCTYPE(None, c_void_p)
SQLITE_STATIC = sqlite3_destructor_type(0)
SQLITE_TRANSIENT = sqlite3_destructor_type(-1)
# Binding Values To Prepared Statements
@annotate
def sqlite3_bind_blob64(
pStmt: sqlite3_stmt_p,
index: c_int,
value: c_void_p,
nByte: sqlite3_uint64,
destructor: sqlite3_destructor_type
) -> c_int:
pass
@annotate
def sqlite3_bind_double(pStmt: sqlite3_stmt_p, index: c_int, value: c_double) -> c_int:
pass
@annotate
def sqlite3_bind_int64(pStmt: sqlite3_stmt_p, index: c_int, value: sqlite3_int64) -> c_int:
pass
@annotate
def sqlite3_bind_null(pStmt: sqlite3_stmt_p, index: c_int) -> c_int:
pass
@annotate
def sqlite3_bind_text64(
pStmt: sqlite3_stmt_p,
index: c_int,
value: c_char_p,
nByte: sqlite3_uint64,
destructor: sqlite3_destructor_type,
encoding: c_ubyte) -> c_int:
pass
@annotate
def sqlite3_bind_value(pStmt: sqlite3_stmt_p, index: c_int, value: sqlite3_value_p) -> c_int:
pass
# @annotate
# def sqlite3_bind_pointer(pStmt: sqlite3_stmt_p, index: c_int, value: c_void_p, type: c_char_p, destructor: sqlite3_destructor_type) -> c_int:
# pass
@annotate
def sqlite3_bind_zeroblob64(pStmt: sqlite3_stmt_p, index: c_int, n: sqlite3_uint64) -> c_int:
pass
# Number Of SQL Parameters
@annotate
def sqlite3_bind_parameter_count(pStmt: sqlite3_stmt_p) -> c_int:
pass
# Name Of A Host Parameter
@annotate
def sqlite3_bind_parameter_name(pStmt: sqlite3_stmt_p, index: c_int) -> c_char_p:
pass
# Index Of A Parameter With A Given Name
@annotate
def sqlite3_bind_parameter_index(pStmt: sqlite3_stmt_p, zName: c_char_p) -> c_int:
pass
# Reset All Bindings On A Prepared Statement
@annotate
def sqlite3_clear_bindings(pStmt: sqlite3_stmt_p) -> c_int:
pass
# Number Of Columns In A Result Set
@annotate
def sqlite3_column_count(pStmt: sqlite3_stmt_p) -> c_int:
pass
# Column Names In A Result Set
@annotate
def sqlite3_column_name(pStmt: sqlite3_stmt_p, n: c_int) -> c_char_p:
pass
# Source Of Data In A Query Result
# @annotate
# def sqlite3_column_database_name(pStmt: sqlite3_stmt_p, n: c_int) -> c_char_p:
# pass
# @annotate
# def sqlite3_column_table_name(pStmt: sqlite3_stmt_p, n: c_int) -> c_char_p:
# pass
# @annotate
# def sqlite3_column_origin_name(pStmt: sqlite3_stmt_p, n: c_int) -> c_char_p:
# pass
# Declared Datatype Of A | |
self.model._reset_iter_counts()
self.final_setup()
self._run_counter += 1
record_model_options(self, self._run_counter)
self.model._clear_iprint()
return self.driver.run()
def compute_jacvec_product(self, of, wrt, mode, seed):
"""
Given a seed and 'of' and 'wrt' variables, compute the total jacobian vector product.
Parameters
----------
of : list of str
Variables whose derivatives will be computed.
wrt : list of str
Derivatives will be computed with respect to these variables.
mode : str
Derivative direction ('fwd' or 'rev').
seed : dict or list
Either a dict keyed by 'wrt' varnames (fwd) or 'of' varnames (rev), containing
dresidual (fwd) or doutput (rev) values, OR a list of dresidual or doutput
values that matches the corresponding 'wrt' (fwd) or 'of' (rev) varname list.
Returns
-------
dict
The total jacobian vector product, keyed by variable name.
"""
if mode == 'fwd':
if len(wrt) != len(seed):
raise RuntimeError(self.msginfo +
": seed and 'wrt' list must be the same length in fwd mode.")
lnames, rnames = of, wrt
lkind, rkind = 'output', 'residual'
else: # rev
if len(of) != len(seed):
raise RuntimeError(self.msginfo +
": seed and 'of' list must be the same length in rev mode.")
lnames, rnames = wrt, of
lkind, rkind = 'residual', 'output'
rvec = self.model._vectors[rkind]['linear']
lvec = self.model._vectors[lkind]['linear']
rvec.set_val(0.)
conns = self.model._conn_global_abs_in2out
# set seed values into dresids (fwd) or doutputs (rev)
# seed may have keys that are inputs and must be converted into auto_ivcs
try:
seed[rnames[0]]
except (IndexError, TypeError):
for i, name in enumerate(rnames):
if name in conns:
rvec[conns[name]] = seed[i]
else:
rvec[name] = seed[i]
else:
for name in rnames:
if name in conns:
rvec[conns[name]] = seed[name]
else:
rvec[name] = seed[name]
# We apply a -1 here because the derivative of the output is minus the derivative of
# the residual in openmdao.
data = rvec.asarray()
data *= -1.
self.model.run_solve_linear(mode)
if mode == 'fwd':
return {n: lvec[n].copy() for n in lnames}
else:
# may need to convert some lnames to auto_ivc names
return {n: lvec[conns[n] if n in conns else n].copy() for n in lnames}
def _setup_recording(self):
"""
Set up case recording.
"""
self._filtered_vars_to_record = self.driver._get_vars_to_record(self.recording_options)
self._rec_mgr.startup(self)
def add_recorder(self, recorder):
"""
Add a recorder to the problem.
Parameters
----------
recorder : CaseRecorder
A recorder instance.
"""
self._rec_mgr.append(recorder)
def cleanup(self):
"""
Clean up resources prior to exit.
"""
# shut down all recorders
self._rec_mgr.shutdown()
# clean up driver and model resources
self.driver.cleanup()
for system in self.model.system_iter(include_self=True, recurse=True):
system.cleanup()
def record(self, case_name):
"""
Record the variables at the Problem level.
Must be called after `final_setup` has been called. This can either
happen automatically through `run_driver` or `run_model`, or it can be
called manually.
Parameters
----------
case_name : str
Name used to identify this Problem case.
"""
if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:
raise RuntimeError(f"{self.msginfo}: Problem.record() cannot be called before "
"`Problem.run_model()`, `Problem.run_driver()`, or "
"`Problem.final_setup()`.")
else:
record_iteration(self, self, case_name)
def record_iteration(self, case_name):
"""
Record the variables at the Problem level.
Parameters
----------
case_name : str
Name used to identify this Problem case.
"""
warn_deprecation("'Problem.record_iteration' has been deprecated. "
"Use 'Problem.record' instead.")
record_iteration(self, self, case_name)
def _get_recorder_metadata(self, case_name):
"""
Return metadata from the latest iteration for use in the recorder.
Parameters
----------
case_name : str
Name of current case.
Returns
-------
dict
Metadata dictionary for the recorder.
"""
return create_local_meta(case_name)
def setup(self, check=False, logger=None, mode='auto', force_alloc_complex=False,
distributed_vector_class=PETScVector, local_vector_class=DefaultVector,
derivatives=True):
"""
Set up the model hierarchy.
When `setup` is called, the model hierarchy is assembled, the processors are allocated
(for MPI), and variables and connections are all assigned. This method traverses down
the model hierarchy to call `setup` on each subsystem, and then traverses up the model
hierarchy to call `configure` on each subsystem.
Parameters
----------
check : None, bool, list of str, or the strs ‘all’
Determines what config checks, if any, are run after setup is complete.
If None or False, no checks are run
If True, the default checks ('out_of_order', 'system', 'solvers', 'dup_inputs',
'missing_recorders', 'unserializable_options', 'comp_has_no_outputs',
'auto_ivc_warnings') are run
If list of str, run those config checks
If ‘all’, all the checks ('auto_ivc_warnings', 'comp_has_no_outputs', 'cycles',
'dup_inputs', 'missing_recorders', 'all_unserializable_options', 'out_of_order',
'promotions', 'solvers', 'system', 'unconnected_inputs') are run.
logger : object
Object for logging config checks if check is True.
mode : str
Derivatives calculation mode, 'fwd' for forward, and 'rev' for
reverse (adjoint). Default is 'auto', which will pick 'fwd' or 'rev' based on
the direction resulting in the smallest number of linear solves required to
compute derivatives.
force_alloc_complex : bool
Force allocation of imaginary part in nonlinear vectors. OpenMDAO can generally
detect when you need to do this, but in some cases (e.g., complex step is used
after a reconfiguration) you may need to set this to True.
distributed_vector_class : type
Reference to the <Vector> class or factory function used to instantiate vectors
and associated transfers involved in interprocess communication.
local_vector_class : type
Reference to the <Vector> class or factory function used to instantiate vectors
and associated transfers involved in intraprocess communication.
derivatives : bool
If True, perform any memory allocations necessary for derivative computation.
Returns
-------
<Problem>
This enables the user to instantiate and setup in one line.
"""
model = self.model
comm = self.comm
# A distributed vector type is required for MPI
if comm.size > 1:
if distributed_vector_class is PETScVector and PETScVector is None:
raise ValueError(self.msginfo +
": Attempting to run in parallel under MPI but PETScVector "
"could not be imported.")
elif not distributed_vector_class.distributed:
raise ValueError("%s: The `distributed_vector_class` argument must be a "
"distributed vector class like `PETScVector` when running in "
"parallel under MPI but '%s' was specified which is not "
"distributed." % (self.msginfo, distributed_vector_class.__name__))
if mode not in ['fwd', 'rev', 'auto']:
msg = "%s: Unsupported mode: '%s'. Use either 'fwd' or 'rev'." % (self.msginfo, mode)
raise ValueError(msg)
self._mode = self._orig_mode = mode
model_comm = self.driver._setup_comm(comm)
# this metadata will be shared by all Systems/Solvers in the system tree
self._metadata = {
'coloring_dir': self.options['coloring_dir'], # directory for coloring files
'recording_iter': _RecIteration(), # manager of recorder iterations
'local_vector_class': local_vector_class,
'distributed_vector_class': distributed_vector_class,
'solver_info': SolverInfo(),
'use_derivatives': derivatives,
'force_alloc_complex': force_alloc_complex, # forces allocation of complex vectors
'vars_to_gather': {}, # vars that are remote somewhere. does not include distrib vars
'prom2abs': {'input': {}, 'output': {}}, # includes ALL promotes including buried ones
'static_mode': False, # used to determine where various 'static'
# and 'dynamic' data structures are stored.
# Dynamic ones are added during System
# setup/configure. They are wiped out and re-created during
# each Problem setup. Static ones are added outside of
# Problem setup and they are never wiped out or re-created.
'config_info': None, # used during config to determine if additional updates required
'parallel_groups': [], # list of pathnames of parallel groups in this model (all procs)
'setup_status': _SetupStatus.PRE_SETUP,
'model_ref': weakref.ref(model), # ref to the model (needed to get out-of-scope
# src data for inputs)
'using_par_deriv_color': False, # True if parallel derivative coloring is being used
}
model._setup(model_comm, mode, self._metadata)
# set static mode back to True in all systems in this Problem
self._metadata['static_mode'] = True
# Cache all args for final setup.
self._check = check
self._logger = logger
self._metadata['setup_status'] = _SetupStatus.POST_SETUP
return self
def final_setup(self):
"""
Perform final setup phase on problem in preparation for run.
This is the second phase of setup, and is done automatically at the start of `run_driver`
and `run_model`. At the beginning of final_setup, we have a model hierarchy with defined
variables, solvers, case_recorders, and derivative settings. During this phase, the vectors
are created and populated, the drivers and solvers are initialized, and the recorders are
started, and the rest of the framework is prepared for execution.
"""
driver = self.driver
response_size, desvar_size = driver._update_voi_meta(self.model)
# update mode if it's been set to 'auto'
if self._orig_mode == 'auto':
mode = 'rev' if response_size < desvar_size else 'fwd'
self._mode = mode
else:
mode = self._orig_mode
if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:
self.model._final_setup(self.comm)
driver._setup_driver(self)
info = driver._coloring_info
coloring | |
0 slopes. False otherwise.
"""
pass
def removeKey(self, keys):
"""self.removeKey(keys) -> None.
Remove some keys from the curve.
@param keys: The sequence of keys to be removed.
@return: None.
"""
pass
def size(self):
"""self.size() -> Number of keys.
@return: Number of keys.
"""
pass
def knobIndex(self):
"""self.knobIndex() -> Int.
Return the knob index this animation belongs to.@return: Int.
"""
pass
def inverse(self, y):
"""self.inverse(y) -> Float.
The inverse function at value y. This is the value of x such that evaluate(x)
returns y.
This is designed to invert color lookup tables. It only works if the
derivative is zero or positive everywhere.
@param y: The value of the function to get the inverse for.
@return: Float.
"""
pass
def __new__(self, S):
"""T.__new__(S, ...) -> a new object with type S, a subtype of T"""
pass
def selected(self):
"""self.selected() -> bool
@return: True if selected, False otherwise.
"""
pass
def setKey(self):
"""self.setKey(t, y) -> Key.
Set a key at time t and value y. If there is no key
there one is created. If there is a key there it is moved
vertically to be at y. If a new key is inserted the
interpolation and extrapolation are copied from a neighboring key, if
there were no keys then it is set to nuke.SMOOTH interpolation and
nuke.CONSTANT extrapolation.
@param t: The time to set the key at.
@param y: The value for the key.
@return: The new key.
"""
pass
def addKey(self, keys):
"""self.addKey(keys) -> None.
Insert a sequence of keys.
@param keys: Sequence of AnimationKey.
@return: None.
"""
pass
def changeInterpolation(self):
"""self.changeInterpolation(keys, type) -> None.
Change interpolation (and extrapolation) type for the keys.
@param keys: Sequence of keys.
@param type: Interpolation type. One of:
nuke.HORIZONTAL
nuke.BREAK
nuke.BEFORE_CONST
nuke.BEFORE_LINEAR
nuke.AFTER_CONST
nuke.AFTER_LINEAR.
@return: None.
"""
pass
def toScript(self, selected):
"""self.toScript(selected) -> str
@param selected: Optional parameter. If this is given and is True, then only
process the selected curves; otherwise convert all.
@return: A string containing the curves.
"""
pass
def knob(self):
"""self.knob() -> Knob.
Return knob this animation belongs to.@return: Knob.
"""
pass
def keys(self):
"""self.keys() -> List of keys.
@return: List of keys.
"""
pass
def evaluate(self, t):
"""self.evaluate(t) -> float
Value at time 't'.
@param t: Time.
@return: The value of the animation at time 't'.
"""
pass
def integrate(self):
"""self.integrate(t1, t2) -> Float.
Calculate the area underneath the curve from t1 to t2.
@param t1 The start of the integration range.
@param t2 The end of the integration range.
@return: The result of the integration.
"""
pass
def derivative(self):
"""self.derivative(t, n) -> Float.
The n'th derivative at time 't'. If n is less than 1 it returns evaluate(t).
@param t: Time.
@param n: Optional. Default is 1.
@return: The value of the derivative.
"""
pass
def setExpression(self, s):
"""self.setExpression(s) -> None.
Set expression.
@param s: A string containing the expression.
@return: None.
"""
pass
def identity(self):
"""self.identity() -> bool
@return: True if the animation appears to be such that y == x everywhere. This
is True only for an expression of 'x' or the default expression and all points
having y == x and slope == 1. Extrapolation is ignored.
"""
pass
def clear(self):
"""self.clear() -> None.
Delete all keys.
@return: None.
"""
pass
def fromScript(self, s):
"""self.fromScript(s) -> None.
@param s: String.
@return: None.
"""
pass
def knobAndFieldName(self):
"""self.knobAndFieldName() -> string.
Knob and field name combined (e.g. 'translate.x').
@return: string.
"""
pass
def noExpression(self):
"""self.noExpression() -> bool
@return: True if the expression is the default expression (i.e. the keys
control the curve), False otherwise.
"""
pass
def expression(self):
"""self.expression() -> String.
Get the expression.@return: String.
"""
pass
def view(self):
"""self.view() -> String.
The view this AnimationCurve object is associated with.
@return: String.
"""
pass
class AnimationKey(object):
def __init__(self):
"""x.__init__(...) initializes x; see help(type(x)) for signature"""
pass
def __new__(self, S):
"""T.__new__(S, ...) -> a new object with type S, a subtype of T"""
pass
class Array_Knob(Knob):
def clearAnimated(self):
"""self.clearAnimated(index, view) -> True if succeeded, False otherwise.
Delete animation.
@param index: Optional index.
@param view: Optional view.
@return: True if succeeded, False otherwise.
"""
pass
def removeKey(self):
"""self.removeKey(index, view) -> True if succeeded, False otherwise.
Remove key.
@param index: Optional index.
@param view: Optional view.
@return: True if succeeded, False otherwise.
"""
pass
def setValueAt(self):
"""self.setValueAt(value, time, index, view) -> bool.
Set value of element 'index' at time for view. If the knob is animated, it will set a new keyframe or change an existing one. Index and view are optional. Return True if successful.
@param value: Floating point value.
@param time: Time.
@param index: Optional index.
@param view: Optional view.
@return: True if value changed, False otherwise. Safe to ignore.
"""
pass
def frame(self):
"""self.frame() -> Frame number.
@return: Frame number.
"""
pass
def removeKeyAt(self):
"""self.removeKeyAt(time, index, view) -> True if succeeded, False otherwise.
Remove keyframe at specified time, optional index and view. Return True if successful.
@param time: Time.
@param index: Optional index.
@param view: Optional view.
@return: True if succeeded, False otherwise.
"""
pass
def height(self):
"""self.height() -> Height of array of values.
@return: Height of array of values.
"""
pass
def minimum(self):
"""self.min() -> Minimum value.
@return: Minimum value.
"""
pass
def unsplitView(self, view):
"""self.unsplitView(view) -> None.
Unsplit the view so that it shares a value with other views.
@param view: Optional view. Default is current view.
@return: None.
"""
pass
def array(self):
"""self.array() -> List of knob values.
@return: List of knob values.
"""
pass
def getIntegral(self):
"""Return integral at time interval [t1, t2] and index 'i'."""
pass
def singleValue(self, view):
"""self.singleValue(view) -> True if holds a single value.
@param view: Optional view. Default is current view.
@return: True if holds a single value.
"""
pass
def isKeyAt(self):
"""self.isKeyAt(time, index, view) -> True if succeeded, False otherwise.
Returns True if there is a keyframe at specified time, optional index and view, otherwise returns False.
@param time: Time.
@param index: Optional index.
@param view: Optional view.
@return: True if succeeded, False otherwise.
"""
pass
def hasExpression(self, index):
"""self.hasExpression(index) -> True if has expression, False otherwise.
@param index: Optional index.
@return: True if has expression, False otherwise.
"""
pass
def __new__(self, S):
"""T.__new__(S, ...) -> a new object with type S, a subtype of T"""
pass
def setKeyAt(self):
"""self.setKeyAt(time, index, view) -> None.
Set a key on element 'index', at time and view.
@param time: Time.
@param index: Optional index.
@param view: Optional view.
@return: None.
"""
pass
def min(self):
"""self.min() -> Minimum value.
@return: Minimum value.
"""
pass
def defaultValue(self):
"""self.defaultValue() -> Default value.
@return: Default value.
"""
pass
def getKeyTime(self):
"""Return time of the keyframe at time 't' and channel 'c'."""
pass
def deleteAnimation(self, curve):
"""self.deleteAnimation(curve) -> None. Raises ValueError if not found.
Deletes the AnimationCurve.
@param curve: An AnimationCurve instance which belongs to this Knob.
@return: None. Raises ValueError if not found.
"""
pass
def width(self):
"""self.width() -> Width of array of values.
@return: Width of array of values.
"""
pass
def getNumKeys(self):
"""Return number of keys at channel 'c'."""
pass
def valueAt(self):
"""self.valueAt(time, index, view) -> Floating point or List of floating point values (in case some are different).
Return value for this knob at specified time, optional index and view.
@param time: Time.
@param index: Optional index. Default is 0.
@param view: Optional view.
@return: Floating point or List of floating point values (in case some are different).
"""
pass
def arraySize(self):
"""self.arraySize() -> Number of elements in array.
@return: Number of elements in array.
"""
pass
def max(self):
"""self.max() -> Maximum value.
@return: Maximum value.
"""
pass
def setSingleValue(self):
"""self.setSingleValue(b, view) -> None.
Set to just hold a single value or not.
@param b: Boolean object.
@param view: Optional view. Default is current view.
@return: None.
"""
pass
def toScript(self):
"""self.toScript(quote, context) | |
<filename>addons/odoo/fields.py
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" High-level objects for fields. """
from collections import defaultdict
from datetime import date, datetime, time
from operator import attrgetter
import itertools
import logging
import base64
import binascii
import pytz
try:
from xmlrpc.client import MAXINT
except ImportError:
#pylint: disable=bad-python3-import
from xmlrpclib import MAXINT
import psycopg2
from .tools import float_repr, float_round, frozendict, html_sanitize, human_size, pg_varchar, \
ustr, OrderedSet, pycompat, sql, date_utils, unique, IterableGenerator, image_process, merge_sequences
from .tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from .tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
from .tools.translate import html_translate, _
from .tools.mimetypes import guess_mimetype
from odoo.exceptions import CacheMiss
DATE_LENGTH = len(date.today().strftime(DATE_FORMAT))
DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT))
EMPTY_DICT = frozendict()
RENAMED_ATTRS = [('select', 'index'), ('digits_compute', 'digits')]
DEPRECATED_ATTRS = [("oldname", "use an upgrade script instead.")]
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__[:-7] + '.schema')
Default = object() # default value for __init__() methods
def first(records):
""" Return the first record in ``records``, with the same prefetching. """
return next(iter(records)) if len(records) > 1 else records
def resolve_mro(model, name, predicate):
""" Return the list of successively overridden values of attribute ``name``
in mro order on ``model`` that satisfy ``predicate``. Model classes
(the ones that appear in the registry) are ignored.
"""
result = []
for cls in type(model).__mro__:
if not getattr(cls, 'pool', None) and name in cls.__dict__:
value = cls.__dict__[name]
if not predicate(value):
break
result.append(value)
return result
class MetaField(type):
""" Metaclass for field classes. """
by_type = {}
def __new__(meta, name, bases, attrs):
""" Combine the ``_slots`` dict from parent classes, and determine
``__slots__`` for them on the new class.
"""
base_slots = {}
for base in reversed(bases):
base_slots.update(getattr(base, '_slots', ()))
slots = dict(base_slots)
slots.update(attrs.get('_slots', ()))
attrs['__slots__'] = set(slots) - set(base_slots)
attrs['_slots'] = slots
return type.__new__(meta, name, bases, attrs)
def __init__(cls, name, bases, attrs):
super(MetaField, cls).__init__(name, bases, attrs)
if not hasattr(cls, 'type'):
return
if cls.type and cls.type not in MetaField.by_type:
MetaField.by_type[cls.type] = cls
# compute class attributes to avoid calling dir() on fields
cls.related_attrs = []
cls.description_attrs = []
for attr in dir(cls):
if attr.startswith('_related_'):
cls.related_attrs.append((attr[9:], attr))
elif attr.startswith('_description_'):
cls.description_attrs.append((attr[13:], attr))
_global_seq = iter(itertools.count())
class Field(MetaField('DummyField', (object,), {})):
"""The field descriptor contains the field definition, and manages accesses
and assignments of the corresponding field on records. The following
attributes may be provided when instanciating a field:
:param str string: the label of the field seen by users; if not
set, the ORM takes the field name in the class (capitalized).
:param str help: the tooltip of the field seen by users
:param bool readonly: whether the field is readonly (default: ``False``)
This only has an impact on the UI. Any field assignation in code will work
(if the field is a stored field or an inversable one).
:param bool required: whether the value of the field is required (default: ``False``)
:param bool index: whether the field is indexed in database. Note: no effect
on non-stored and virtual fields. (default: ``False``)
:param default: the default value for the field; this is either a static
value, or a function taking a recordset and returning a value; use
``default=None`` to discard default values for the field
:type default: value or callable
:param dict states: a dictionary mapping state values to lists of UI attribute-value
pairs; possible attributes are: ``readonly``, ``required``, ``invisible``.
.. warning:: Any state-based condition requires the ``state`` field value to be
available on the client-side UI. This is typically done by including it in
the relevant views, possibly made invisible if not relevant for the
end-user.
:param str groups: comma-separated list of group xml ids (string); this
restricts the field access to the users of the given groups only
:param bool company_dependent: whether the field value is dependent of the current company;
The value isn't stored on the model table. It is registered as `ir.property`.
When the value of the company_dependent field is needed, an `ir.property`
is searched, linked to the current company (and current record if one property
exists).
If the value is changed on the record, it either modifies the existing property
for the current record (if one exists), or creates a new one for the current company
and res_id.
If the value is changed on the company side, it will impact all records on which
the value hasn't been changed.
:param bool copy: whether the field value should be copied when the record
is duplicated (default: ``True`` for normal fields, ``False`` for
``one2many`` and computed fields, including property fields and
related fields)
:param bool store: whether the field is stored in database
(default:``True``, ``False`` for computed fields)
:param str group_operator: aggregate function used by :meth:`~odoo.models.Model.read_group`
when grouping on this field.
Supported aggregate functions are:
* ``array_agg`` : values, including nulls, concatenated into an array
* ``count`` : number of rows
* ``count_distinct`` : number of distinct rows
* ``bool_and`` : true if all values are true, otherwise false
* ``bool_or`` : true if at least one value is true, otherwise false
* ``max`` : maximum value of all values
* ``min`` : minimum value of all values
* ``avg`` : the average (arithmetic mean) of all values
* ``sum`` : sum of all values
:param str group_expand: function used to expand read_group results when grouping on
the current field.
.. code-block:: python
@api.model
def _read_group_selection_field(self, values, domain, order):
return ['choice1', 'choice2', ...] # available selection choices.
@api.model
def _read_group_many2one_field(self, records, domain, order):
return records + self.search([custom_domain])
.. rubric:: Computed Fields
:param str compute: name of a method that computes the field
.. seealso:: :ref:`Advanced Fields/Compute fields <reference/fields/compute>`
:param bool compute_sudo: whether the field should be recomputed as superuser
to bypass access rights (by default ``True`` for stored fields, ``False``
for non stored fields)
:param str inverse: name of a method that inverses the field (optional)
:param str search: name of a method that implement search on the field (optional)
:param str related: sequence of field names
.. seealso:: :ref:`Advanced fields/Related fields <reference/fields/related>`
"""
type = None # type of the field (string)
relational = False # whether the field is a relational one
translate = False # whether the field is translated
column_type = None # database column type (ident, spec)
column_format = '%s' # placeholder for value in queries
column_cast_from = () # column types that may be cast to this
_slots = {
'args': EMPTY_DICT, # the parameters given to __init__()
'_attrs': EMPTY_DICT, # the field's non-slot attributes
'_module': None, # the field's module name
'_modules': None, # modules that define this field
'_setup_done': None, # the field's setup state: None, 'base' or 'full'
'_sequence': None, # absolute ordering of the field
'automatic': False, # whether the field is automatically created ("magic" field)
'inherited': False, # whether the field is inherited (_inherits)
'inherited_field': None, # the corresponding inherited field
'name': None, # name of the field
'model_name': None, # name of the model of this field
'comodel_name': None, # name of the model of values (if relational)
'store': True, # whether the field is stored in database
'index': False, # whether the field is indexed in database
'manual': False, # whether the field is a custom field
'copy': True, # whether the field is copied over by BaseModel.copy()
'depends': None, # collection of field dependencies
'depends_context': None, # collection of context key dependencies
'recursive': False, # whether self depends on itself
'compute': None, # compute(recs) computes field on recs
'compute_sudo': False, # whether field should be recomputed as superuser
'inverse': None, # inverse(recs) inverses field on recs
'search': None, # search(recs, operator, value) searches on self
'related': None, # sequence of field names, for related fields
'company_dependent': False, # whether ``self`` is company-dependent (property field)
'default': None, # default(recs) returns the default value
'string': None, # field label
'help': None, # field tooltip
'readonly': False, # whether the field is readonly
'required': False, # whether the field is required
'states': None, # set readonly and required depending on state
'groups': None, # csv list of | |
mode == 'remove':
if len(t_seq + ext_seq) <= maxlen:
return [t_seq + ext_seq], [[0, len(t_seq + ext_seq)]]
else:
return [], [[0, 0]]
if mode == 'slide':
return nlp2.sliding_windows(t_seq, maxlen - len(ext_seq), append_seq=ext_seq)
if mode == 'start_slice':
slices = t_seq[:maxlen - len(ext_seq)]
slices.extend(ext_seq)
return [slices], [[0, maxlen - len(ext_seq)]]
if mode == 'end_slice':
start_pos = len(t_seq) + len(ext_seq) - maxlen
slices = t_seq[start_pos:]
slices.extend(ext_seq)
return [slices], [[max(0, start_pos), len(t_seq)]]
def get_topP_unk_token(tokenizer, file_paths: list, topP: float):
unk_count_dict = OrderedDict()
for path in file_paths:
for input_sent in tqdm(nlp2.read_files_yield_lines(path)):
for tok in nlp2.split_sentence_to_array(input_sent):
if tokenizer._unk_token in tokenizer.tokenize(tok):
unk_count_dict[tok] = unk_count_dict.get(tok, 0) + 1
top_range = int((len(unk_count_dict) + 1) * topP * 100)
return list(unk_count_dict.keys())[:top_range]
def get_freqK_unk_token(tokenizer, file_paths: list, freqK: int):
unk_count_dict = OrderedDict()
for path in file_paths:
for input_sent in tqdm(nlp2.read_files_yield_lines(path)):
for tok in nlp2.split_sentence_to_array(input_sent):
if tokenizer._unk_token in tokenizer.tokenize(tok):
unk_count_dict[tok] = unk_count_dict.get(tok, 0) + 1
return [key for key, value in unk_count_dict.items() if value >= freqK]
"""### Eval_metric"""
from collections import defaultdict
import string
import re
from collections import Counter
def _normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
if len(text) > 1:
return re.sub(r'\b(a|an|the)\b', ' ', text)
else:
return text
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _f1_score(prediction, ground_truth):
prediction_tokens = _normalize_answer(prediction).split()
ground_truth_tokens = _normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
class EvalMetric:
def __init__(self, tokenizer, max_candidate=6):
self.tasks = defaultdict(lambda: defaultdict(list))
self.max_candidate = max_candidate
self.tokenizer = tokenizer
self.target_list = defaultdict(lambda: defaultdict(int))
def tokenize_text(self, text):
return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text))
def add_record(self, input, predicted, target, task='default'):
if isinstance(input, str):
input = self.tokenize_text(input.strip())
if isinstance(input, list):
for i, t in enumerate(input):
input[i] = self.tokenize_text(t.strip())
if isinstance(predicted, str):
predicted = self.tokenize_text(predicted)
if isinstance(predicted, list):
for i, t in enumerate(predicted):
predicted[i] = self.tokenize_text(t.strip())
if isinstance(target, str):
targets = []
if "[SEP]" in target:
targets.extend([self.tokenize_text(st.strip()) for st in target.split("[SEP]")])
else:
targets.append(self.tokenize_text(target.strip()))
if isinstance(target, list):
for i, t in enumerate(target):
target[i] = self.tokenize_text(t.strip())
targets = target
if self.max_candidate - len(targets) > 0 and "nlg" in task:
targets.extend([""] * (self.max_candidate - len(targets)))
for t in targets:
self.target_list[task][t] += 1
self.tasks[task]['input'].append(input)
self.tasks[task]['predicted'].append(predicted)
self.tasks[task]['predicteds'].append([predicted])
self.tasks[task]['target'].append(target)
self.tasks[task]['targets'].append(targets)
def get_record(self, task='default'):
return self.tasks[task]
def cal_score(self, metric):
data_score = []
for task_name, task in self.tasks.items():
print("Task : " + task_name + " report ")
if "emf1" in metric:
em = 0
total = 0
f1 = 0
for pos, predict in enumerate(task['predicted']):
em_list = []
f1_list = []
for target in task['targets'][pos]:
if _normalize_answer(str(predict)) == _normalize_answer(str(target)) and len(
_normalize_answer(str(predict))) > 0 or len(str(predict)) == len(str(target)) == 0:
em_score = 1
f1_score = 1
else:
em_score = 0
f1_score = _f1_score(str(predict), str(target))
em_list.append(em_score)
f1_list.append(f1_score)
em += max(em_list)
f1 += max(f1_list)
data_score.append([predict, task['targets'][pos][em_list.index(max(em_list))],
{'em': max(em_list), 'f1': max(f1_list)}])
total += 1
result = {"EM": em / (total or not total), "F1": f1 / (total or not total)}
data_score = sorted(data_score, key=lambda i: i[2]['em'], reverse=True)
if "nlg" in metric:
try:
from nlgeval import NLGEval
except ImportError:
print(
"nlg-eval package not install, plz install it: pip install git+https://github.com/voidful/nlg-eval.git ; nlg-eval --setup ./nlg-eval-data/")
raise
nlgeval = NLGEval(no_skipthoughts=True, no_glove=True, metrics_to_omit=["METEOR"])
targets = task['targets']
predicted = task['predicted']
for t, p in zip(targets, predicted):
data_score.append([p, t, nlgeval.compute_metrics(ref_list=list(map(list, zip(t))), hyp_list=[p])])
result = nlgeval.compute_metrics(ref_list=list(map(list, zip(*task['targets']))), # transpose
hyp_list=predicted)
data_score = sorted(data_score, key=lambda i: i[2]['ROUGE_L'])
if "clas" in metric:
from sklearn.metrics import classification_report
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import precision_recall_fscore_support
target_key = [t for t in self.target_list[task_name].keys() if len(t) > 0]
mlb = MultiLabelBinarizer().fit([target_key])
# remove all blank target
task['targets'] = [[j for j in sub if len(j) > 0] for sub in task['targets']]
# modify for tagging result
if isinstance(task['predicteds'][0][0], list):
task['targets'] = sum([[[j] for j in sub] for sub in task['targets']], [])
task['predicteds'] = sum([[[j] for j in sub] for sub in task['predicted']], [])
if len(task['targets']) != len(task['predicteds']):
diff = len(task['targets']) - len(task['predicteds'])
task['predicteds'].extend([['']] * diff)
targets = task['targets']
predicted = task['predicteds']
for p, t in zip(predicted, targets):
score = dict(zip(["precision", "recall", "fbeta_score", "support"],
precision_recall_fscore_support(mlb.transform([t]), mlb.transform([p]),
average='weighted')))
data_score.append([p, t, score])
print(mlb.classes_)
result = classification_report(
mlb.transform(targets),
mlb.transform(predicted),
target_names=list(mlb.classes_))
data_score = sorted(data_score, key=lambda i: i[2]['fbeta_score'])
yield (task_name, result, data_score)
"""## Model"""
def once_get_feature_from_data(tokenizer, maxlen, input, target=None, ntarget=None, reserved_len=0,
handle_exceed_='start_slice', add_end_tok=True, **kwargs):
feature_dict_list = []
tokenized_target = tokenizer.tokenize(target) if target is not None else []
print()
t_input_list, _ = handle_exceed(tokenizer, input, maxlen - 3 - len(tokenized_target), handle_exceed_)
for t_input in t_input_list: # -2 for cls and sep and prediction end sep
row_dict = dict()
tokenized_input = [tok_begin(tokenizer)] + t_input[:maxlen - reserved_len - 3] + [tok_sep(tokenizer)]
mask_id = [1] * len(tokenized_input)
type_id = [0] * len(tokenized_input)
row_dict['target'] = [-1] * maxlen
row_dict['ntarget'] = [-1] * maxlen
tokenized_input_id = tokenizer.convert_tokens_to_ids(tokenized_input)
target_start = len(tokenized_input_id)
target_end = maxlen
if target is not None:
if add_end_tok:
tokenized_target += [tok_sep(tokenizer)]
tokenized_target_id = [-1] * len(tokenized_input)
tokenized_target_id.extend(tokenizer.convert_tokens_to_ids(tokenized_target))
target_end = len(tokenized_target_id) - 1
tokenized_target_id.extend([-1] * (maxlen - len(tokenized_target_id)))
row_dict['target'] = tokenized_target_id
if ntarget is not None:
tokenized_ntarget = tokenizer.tokenize(ntarget)
tokenized_ntarget_id = [-1] * target_start
tokenized_ntarget_id.extend(tokenizer.convert_tokens_to_ids(tokenized_ntarget))
tokenized_ntarget_id.extend([-1] * (maxlen - len(tokenized_ntarget_id)))
if len(tokenized_ntarget_id) <= maxlen:
row_dict['ntarget'] = tokenized_ntarget_id
tokenized_input_id.extend([tokenizer.mask_token_id] * (maxlen - len(tokenized_input_id)))
mask_id.extend([0] * (maxlen - len(mask_id)))
type_id.extend([1] * (maxlen - len(type_id)))
row_dict['input'] = tokenized_input_id
row_dict['type'] = type_id
row_dict['mask'] = mask_id
row_dict['start'] = target_start
row_dict['end'] = target_end
feature_dict_list.append(row_dict)
return feature_dict_list
import csv
from collections import defaultdict
from tqdm import tqdm
# import tfkit.utility.tok as tok
# import tfkit.model.once as once
def get_data_from_row(i):
# print(i)
tasks = defaultdict(list)
task = 'default'
tasks[task] = []
source_text = i[0]
target_text = i[1].strip().split(" ")
negative_text = i[2].strip() if len(i) > 2 else None
input = source_text
target = target_text
return tasks, task, input, [target, negative_text]
# tasks == {'default': []})
# task == 'default'
# input - CAQ
# target - D (distractor)
# negative_text - A (answer)
def get_data_from_file(fpath):
with open(fpath, encoding='utf') as csvfile:
for i in tqdm(list(csv.reader(csvfile))):
yield get_data_from_row(i)
def get_full_file(fpath):
tasks = defaultdict(list)
task = 'default'
tasks[task] = []
with open(fpath, encoding='utf') as csvfile:
return list(csv.reader(csvfile))
def preprocessing_data(item, tokenizer, maxlen=512, handle_exceed='start_slice',
likelihood=['none', 'pos', 'neg', 'both'], reserved_len=0, **kwargs):
"""
item: tuple returned by get_data_from_file()
tokenizer: tokenizer object
maxlen:
handle_exceed:
likelyhood: model specification
reserved_len:
kwargs: cli args
"""
likelihood = likelihood[0] if isinstance(likelihood, list) else likelihood
tasks, task, input, targets = item
p_target, n_target = targets
input = input.strip()
tokenized_target = tokenizer.tokenize(" ".join(p_target))
param_dict = {'tokenizer': tokenizer, 'maxlen': maxlen,
'handle_exceed': handle_exceed, 'reserved_len': reserved_len}
# print(kwargs)
if kwargs.get('setting') == 'QA_pretrained_triplet':
param_dict.update({
'CQA': 'input'
})
# each word in sentence
for j in range(1, len(tokenized_target) + 1):
if "neg" in likelihood or 'both' in likelihood:
# formatting neg data in csv
if n_target is None:
ntext_arr = [tokenizer.convert_tokens_to_string(tokenized_target[:j - 1])]
elif "[SEP]" in n_target:
ntext_arr = [ntext.strip() for ntext in n_target.split("[SEP]")]
else:
ntext_arr = [n_target.strip()]
# adding neg data
for neg_text in ntext_arr:
yield get_feature_from_data, {
**{'input': input, 'previous': tokenized_target[:j - 1],
'target': tokenized_target[:j], 'ntarget': neg_text, "add_end_tok": False},
**param_dict}
else:
yield get_feature_from_data, {**{'input': input, 'previous': tokenized_target[:j - 1],
'target': tokenized_target[:j], 'ntarget': None}, **param_dict}
# end of the last word
if "neg" in likelihood or 'both' in likelihood:
# formatting neg data in csv
if n_target is None:
ntext_arr = [tokenizer.convert_tokens_to_string(tokenized_target[:j])]
elif "[SEP]" in n_target:
ntext_arr = [ntext.strip() for ntext in n_target.split("[SEP]")]
else:
ntext_arr = [n_target.strip()]
# adding neg data
for neg_text in ntext_arr:
yield get_feature_from_data, {**{'input': input, 'previous': tokenized_target,
'target': [tok_sep(tokenizer)], 'ntarget': neg_text}, **param_dict}
else:
yield get_feature_from_data, {**{'input': input, 'previous': tokenized_target,
'target': [tok_sep(tokenizer)], 'ntarget': None}, **param_dict}
# whole sentence masking
if 'pos' in likelihood:
yield once_get_feature_from_data, {**{'input': input, 'target': " ".join(p_target)}, **param_dict}
elif 'both' in likelihood:
# formatting neg data in csv
if n_target is None:
ntext_arr = []
elif "[SEP]" in n_target:
ntext_arr = [ntext.strip() for ntext in n_target.split("[SEP]")]
else:
ntext_arr = [n_target.strip()]
for neg_text in ntext_arr:
yield once_get_feature_from_data, {**{'input': input, 'target': " | |
= spark_version
self.default_spark_log_folder = default_spark_log_folder
self.node_size = node_size
self.node_size_family = node_size_family
class CheckNameAvailabilityRequest(Model):
"""Check name availability request.
A request about whether a workspace name is available.
:param name: Workspace name
:type name: str
:param type: Type: workspace
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, name: str=None, type: str=None, **kwargs) -> None:
super(CheckNameAvailabilityRequest, self).__init__(**kwargs)
self.name = name
self.type = type
class CheckNameAvailabilityResponse(Model):
"""Check name availability response.
A response saying whether the workspace name is available.
:param message: Validation message
:type message: str
:param available: Whether the workspace name is available
:type available: bool
:param reason: Reason the workspace name is or is not available
:type reason: str
:param name: Workspace name
:type name: str
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'available': {'key': 'available', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, *, message: str=None, available: bool=None, reason: str=None, name: str=None, **kwargs) -> None:
super(CheckNameAvailabilityResponse, self).__init__(**kwargs)
self.message = message
self.available = available
self.reason = reason
self.name = name
class CloudError(Model):
"""The object that defines the structure of an Azure Synapse error response.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code.
:type code: str
:param message: Required. Error message.
:type message: str
:param target: Property name/path in request associated with error.
:type target: str
:param details: Array with additional error details.
:type details: list[~azure.mgmt.synapse.models.CloudError]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'error.code', 'type': 'str'},
'message': {'key': 'error.message', 'type': 'str'},
'target': {'key': 'error.target', 'type': 'str'},
'details': {'key': 'error.details', 'type': '[CloudError]'},
}
def __init__(self, *, code: str, message: str, target: str=None, details=None, **kwargs) -> None:
super(CloudError, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class CloudErrorException(HttpOperationError):
"""Server responsed with exception of type: 'CloudError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(CloudErrorException, self).__init__(deserialize, response, 'CloudError', *args)
class CustomSetupBase(Model):
"""The base definition of the custom setup.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CmdkeySetup, EnvironmentVariableSetup, ComponentSetup
All required parameters must be populated in order to send to Azure.
:param type: Required. Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'CmdkeySetup': 'CmdkeySetup', 'EnvironmentVariableSetup': 'EnvironmentVariableSetup', 'ComponentSetup': 'ComponentSetup'}
}
def __init__(self, **kwargs) -> None:
super(CustomSetupBase, self).__init__(**kwargs)
self.type = None
class CmdkeySetup(CustomSetupBase):
"""The custom setup of running cmdkey commands.
All required parameters must be populated in order to send to Azure.
:param type: Required. Constant filled by server.
:type type: str
:param target_name: Required. The server name of data source access.
:type target_name: object
:param user_name: Required. The user name of data source access.
:type user_name: object
:param password: Required. The password of data source access.
:type password: ~azure.mgmt.synapse.models.SecretBase
"""
_validation = {
'type': {'required': True},
'target_name': {'required': True},
'user_name': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'target_name': {'key': 'typeProperties.targetName', 'type': 'object'},
'user_name': {'key': 'typeProperties.userName', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecretBase'},
}
def __init__(self, *, target_name, user_name, password, **kwargs) -> None:
super(CmdkeySetup, self).__init__(**kwargs)
self.target_name = target_name
self.user_name = user_name
self.password = password
self.type = 'CmdkeySetup'
class ComponentSetup(CustomSetupBase):
"""The custom setup of installing 3rd party components.
All required parameters must be populated in order to send to Azure.
:param type: Required. Constant filled by server.
:type type: str
:param component_name: Required. The name of the 3rd party component.
:type component_name: str
:param license_key: The license key to activate the component.
:type license_key: ~azure.mgmt.synapse.models.SecretBase
"""
_validation = {
'type': {'required': True},
'component_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'component_name': {'key': 'typeProperties.componentName', 'type': 'str'},
'license_key': {'key': 'typeProperties.licenseKey', 'type': 'SecretBase'},
}
def __init__(self, *, component_name: str, license_key=None, **kwargs) -> None:
super(ComponentSetup, self).__init__(**kwargs)
self.component_name = component_name
self.license_key = license_key
self.type = 'ComponentSetup'
class CreateSqlPoolRestorePointDefinition(Model):
"""Contains the information necessary to perform a create Sql pool restore
point operation.
All required parameters must be populated in order to send to Azure.
:param restore_point_label: Required. The restore point label to apply
:type restore_point_label: str
"""
_validation = {
'restore_point_label': {'required': True},
}
_attribute_map = {
'restore_point_label': {'key': 'restorePointLabel', 'type': 'str'},
}
def __init__(self, *, restore_point_label: str, **kwargs) -> None:
super(CreateSqlPoolRestorePointDefinition, self).__init__(**kwargs)
self.restore_point_label = restore_point_label
class DataLakeStorageAccountDetails(Model):
"""Details of the data lake storage account associated with the workspace.
:param account_url: Account URL
:type account_url: str
:param filesystem: Filesystem name
:type filesystem: str
"""
_attribute_map = {
'account_url': {'key': 'accountUrl', 'type': 'str'},
'filesystem': {'key': 'filesystem', 'type': 'str'},
}
def __init__(self, *, account_url: str=None, filesystem: str=None, **kwargs) -> None:
super(DataLakeStorageAccountDetails, self).__init__(**kwargs)
self.account_url = account_url
self.filesystem = filesystem
class ProxyResource(Resource):
"""The resource model definition for a ARM proxy resource. It will have
everything other than required location and tags.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(ProxyResource, self).__init__(**kwargs)
class DataWarehouseUserActivities(ProxyResource):
"""User activities of a data warehouse.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:ivar active_queries_count: Count of running and suspended queries.
:vartype active_queries_count: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'active_queries_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'active_queries_count': {'key': 'properties.activeQueriesCount', 'type': 'int'},
}
def __init__(self, **kwargs) -> None:
super(DataWarehouseUserActivities, self).__init__(**kwargs)
self.active_queries_count = None
class EntityReference(Model):
"""The entity reference.
:param type: The type of this referenced entity. Possible values include:
'IntegrationRuntimeReference', 'LinkedServiceReference'
:type type: str or
~azure.mgmt.synapse.models.IntegrationRuntimeEntityReferenceType
:param reference_name: The name of this referenced entity.
:type reference_name: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
}
def __init__(self, *, type=None, reference_name: str=None, **kwargs) -> None:
super(EntityReference, self).__init__(**kwargs)
self.type = type
self.reference_name = reference_name
class EnvironmentVariableSetup(CustomSetupBase):
"""The custom setup of setting environment variable.
All required parameters must be populated in order to send to Azure.
:param type: Required. Constant filled by server.
:type type: str
:param variable_name: Required. The name of the environment variable.
:type variable_name: str
:param variable_value: Required. The value of the environment variable.
:type variable_value: str
"""
_validation = {
'type': {'required': True},
'variable_name': {'required': True},
'variable_value': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'variable_name': {'key': 'typeProperties.variableName', 'type': 'str'},
'variable_value': {'key': 'typeProperties.variableValue', 'type': 'str'},
}
def __init__(self, *, variable_name: str, variable_value: str, **kwargs) -> None:
super(EnvironmentVariableSetup, self).__init__(**kwargs)
self.variable_name = variable_name
self.variable_value = variable_value
self.type = 'EnvironmentVariableSetup'
class ErrorAdditionalInfo(Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: object
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(self, **kwargs) -> None:
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorContract(Model):
"""Error details.
Contains details when the response code | |
<reponame>cltl-students/soren-fomsgaard-qanon-style
#%% ANCHOR IMPORTS
#import pickle
#import multiprocessing
#from os import read
## Preprocessing modules
import re
#import nltk
#from nltk.tokenize import RegexpTokenizer, WhitespaceTokenizer, word_tokenize
#from nltk.stem import WordNetLemmatizer
#from nltk.corpus import stopwords
#import string
from string import punctuation
from collections import defaultdict, Counter
from ekphrasis.classes.spellcorrect import SpellCorrector
import emoji
#from gensim.parsing.preprocessing import remove_stopwords
import spacy
from spacy.tokenizer import _get_regex_pattern
# ekphrasis for social tokenization
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts import emoticons
#from spacy_langdetect import LanguageDetector # language detector for parsing telegram data.
from langdetect import detect
from num2words import num2words
# Use modin. Doesn't work under windows with dask engine.
#import modin.pandas as pd
import pandas as pd
import numpy as np
import swifter
## Anonymization
#import uuid
from utils.grasp import URL, deflood
#from anonymizedf.anonymizedf import anonymize
#from faker import Faker
#%% Read the data
def read_data(filepath , strict_lang='en'):
"""Read data in csv format in order to preprocess.
Args:
filepath (str): a filepath to a csv file with twitter data.
strict_lang (str, optional): whether to select only tweets with explicit language metadata.Defaults to 'en'.
Returns:
a pandas DataFrame: [description]
"""
data = pd.read_csv(filepath , names=["id",
"user",
"language",
"text",
"date",
"favs"])
# Apply language selection if specified.
if strict_lang != None:
data = data.loc[data['language'] == strict_lang]
# # drop duplicate tweets.
data.drop_duplicates(subset=['text'] , inplace=True)
# # Anonymize mentions in tweets
mention = re.compile("@\w+")
data.text = data.text.str.replace(mention, '@USER')
# # Anonymize urls in tweets.
data.text = data.text.str.replace(URL, 'URL')
return data
#data = pd.read_csv('../../../Data/NonQanon/non-qanon-feb-mar.csv', names=["id",
# "user",
# "language",
# "text",
# "date",
# "favs"])
# %%
# find rows that langdetect doesn't like:
# texl70 = test['text']
# langdet = []
# for i in range(len(test)):
# try:
# lang = detect(texl70[i])
# except:
# lang = 'no'
# print("This row throws error:", texl70[i])
# langdet.append(lang)
# Select just the english subset (for the topic model only!)
#data = data.loc[data['language'] == 'en']
#%%[markdown]
## Check for empty tweets
#%%
#data['text'].isnull().values.any()
#%%
# Drop duplicates
#data.drop_duplicates(inplace=True)
#%%[markdown]
## Anonymize users - avoid this.
#%%
# Numerical method.
#data.assign(user=data.user.factorize()[0] + 1)
#%%
# Name method. Uses English names.
### adapted from https://stackoverflow.com/a/59929112 ###
#faker = Faker()
# Seed random generator
# Faker.seed(1881)
# anon_names = {name: faker.name() for name in data['user'].unique()}
# data['user'] = data['user'].map(anon_names)
### Acessed 18-02-2021 ###
#%%
# Anonymize mentions in tweets
#mention = re.compile("@\w+")
# use lambda, str.replace or something else?
#data.text = data.text.str.replace(mention, '@USER')
# Anonymize urls in tweets.
#data.text = data.text.str.replace(URL, 'URL')
#%%
# Return substitutions for inspection.
#subs = data[data['text'].str.contains('URL')]
#%%[markdown]
## Initialize objects for preprocessing.
#%% # ANCHOR SPACY MODEL(s)
# Load models and stopwords
# Load spacy to experiment with it
# English spacy model
english = spacy.load('en_core_web_sm')
# English stopwords
en_stopwords = spacy.lang.en.stop_words.STOP_WORDS
# Dutch spacy model
dutch = spacy.load('nl_core_news_sm')
# Dutch stopwords
nl_stopwords = spacy.lang.nl.stop_words.STOP_WORDS
#%%
# Modify spacy's default tokenizer to ignore in word hyphens and hastags.
models = [english, dutch]
for model in models:
### Adapted from https://stackoverflow.com/a/58053407 ###
# default pattern for tokens that do not get split
re_token_match = _get_regex_pattern(model.Defaults.token_match)
# add your patterns (here: hashtags and in-word hyphens)
re_token_match = f"({re_token_match}|#\w+|\w+-\w+)"
# overwrite token_match function of the tokenizer
model.tokenizer.token_match = re.compile(re_token_match).match
### Acessed 23-02-2021 ###
#%%
# translator object for removeing punctation. Equvalient to re.subbing [^\w\s]
#translator = str.maketrans('', '', punctuation)
#stopwords = set(stopwords.words("english")) # NLTK
#stopwords = remove_stopwords(text) # Gensim
#stopwords = nlp.Defaults.stop_words default spacy stopwords
# Add extra stopwords here "user" is already included in spacy.
stop_list = [en_stopwords , nl_stopwords]
for stops in stop_list:
stops.update(['rt' , 'url'])
# Non-agressive stopwords
non_agg_stopwords = ['RT', 'URL']
#%%
# Ekphrasis preprocessing pipeline
# text_preprocesor = TextPreProcessor(
# # Less agressive normalization.
# #omit = ['email', 'percent', 'money', 'phone', 'hashtag']
# omit = ['hashtag', 'user'],
# normalize = ['email'],
# #annotate = { "allcaps", "elongated", "repeated", 'emphasis', 'censored'}
# fix_html = True,
# segmenter = "twitter",
# corrector = "twitter",
# unpack_hashtags = False,
# unpack_contractions = False,
# spell_correct_elong = False,
# tokenizer = SocialTokenizer(lowercase=False).tokenize,
# #dicts = [emoticons]
# )
#%%
# Just the social tokinzer from ekphrasis.
ek_tok_inc = SocialTokenizer(lowercase=False, emails=False).tokenize
ek_tok_ex = SocialTokenizer(lowercase=False, emojis=False, emoticons=False, emails=False, hashtags=False).tokenize # numbers=False
#%% #ANCHOR EXTRACT_EMOTES()
# Vactorizing these substitutions would be much faster.
# function to extract emojis
emoj_pat = re.compile(emoji.get_emoji_regexp())
econ_set = {econ for econ in emoticons.emoticons.keys()}
econ_table = {econ:"" for econ in econ_set}
htag_pat = re.compile(r"#\w+")
rt_pat = re.compile(r"\bRT\b" , re.M)
# Doesn't work
#econ_string = "|".join(econ_set)
#econ_regexp = re.compile(econ_string)
# This works function works but is very bad...
def mini_clean(instr):
"""Specifically remove emoticons, emojis and hashtags from a string.
Args:
input (str): a string to have emoticons removed.
Returns:
str: a copy of the string with emoticons, emojis and hashtags removed.
"""
# regex subs.
instr = re.sub(emoj_pat , "", instr) # emojis
instr = re.sub(htag_pat , "" , instr) # hashtags
instr = re.sub(rt_pat , "", instr) # retweets
#input = re.sub(r"@USER[\W\S]?\w?", "", input,
# re.IGNORECASE) # subbed mentions
instr = instr.replace("URL" , "") # subbed urls
for con in econ_set:
if con in instr:
#print("match!")
instr = instr.replace(con , "")
return instr
#%%
# trying to vectorize
# see https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.replace.html and https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html
# df.replace(to_replace={"text" : econ_table})
#%%
def extract_emotes(tokens):
"""Extract emojis and emoticons from a tokenized string.
Args:
tokens (list): A list of strings (tokens)
Returns:
tuple: A typle of two dictionaries to be unpacked.
"""
# Using default dicts to count rightaway
#emojis = defaultdict(int)
#econs = defaultdict(int)
# Alternative, two in one.
#matches = {"emojis" : defaultdict(int),
# "emoticons" : defaultdict(int)}
# Using lists. Handle counting later in stats.
emojis = []
econs = []
## UNCOMMENT ##
# Regexp for most common emojis. #Moved to global variable
#emoj_pat = re.compile(emoji.get_emoji_regexp())
# Set of specific emoticons.
#econ_set = {econ for econ in emoticons.emoticons.keys()} # moved to global variable.
## UNCOMMENT ##
### OLD CODE
# Regexp for emoticons
# grasp.EMOTICON # much sparser than ekphrasis
#econ_pat = re.compile(r"[:;=B\*\-\(\)\[\]x0oOpPdD\#\<\>8\.'|\{\}\@=;:]+(?:(?=\s))", flags= re.U)
# Alternative econ_pat, not as robust , adatped from https://stackoverflow.com/questions/28077049/regex-matching-emoticons:
# (\:\w+\:|\<[\/\\]?3|[\(\)\\\D|\*\$][\-\^]?[\:\;\=]|[\:\;\=B8Xx]'?[\-\^]?[3CcDdOoPpSs\@\$\*\\\)\(\/\|])(?=\s|[\!\.\?]|$)
### accessed 16-03-2021
### OLD CODE ###
for token in tokens:
if re.match(emoj_pat , token):
#emojis[token] += 1
emojis.append(token)
if token in econ_set:
econs.append(token)
#econs[token] += 1
#emojis = re.findall(emoj_pat , string)
#econs = re.findall(econ_pat , string)
# if re.match()
# emoji.emoji_count(emoj_pat)
# make dict with emote:count
return emojis , econs
# %% ANCHOR: PREPROCESS()
def preprocess(instr, normalize=True):
"""Preprocess a string.
Args:
instr (str): A string to be processed.
normalize (bool, optional): Indicate whether to apply (agressive) text normalization or not. Defaults to True.
Returns:
str: A Preprocessed copy of the input string.
"""
if normalize == False:
tokenized = ek_tok_inc(instr)
#tokenized_clean = ek_tok_ex(instr)
text_clean = mini_clean(instr)
# Extract hashtags
hashtags = [token.lower() for token in tokenized if token.startswith('#')]
# Extract emojis and emoticons
emotes = extract_emotes(tokenized)
# Apply aggressive normalization for topic modeling.
if normalize == True:
# TODO, disable this / rewrite for consistency with read_data(strict_lang).
# detect language
#if detect(instr) == 'nl':
# nlp = dutch
# stopwords = nl_stopwords
#else:
nlp = english
stopwords = en_stopwords
## Reduce repeated characters to max 3 using grasp.deflood(). Here instead under 'normalization' for computational reasons.
deflooded = deflood(instr , n=3)
## Tokenize
#tokenized = word_tokenize(no_punct.lower())
## Spacy ##
doc = nlp(deflooded) # This can be optimized.
tokenized = [token for token in doc]
## Remove stopwords
## NLTK ##
#rm_swrd = [word for word in tokenized if word not in stopwords]
rm_swrd = [token for token in tokenized if token.norm_ not in stopwords]
## Normalization
## Remove punctuation
#no_punct = instr.translate(translator) # not as reboust as re.sub
#word_pat = re.compile(r'[^\w\s]' , re.M)
#no_punct = re.sub(word_pat , ' ' , instr) # NOTE: The whitespace used to sub here is interpreted differently by NLTK versus spacy during tokenization.
no_punct = [token for token in rm_swrd if token.norm_ not in punctuation]
# Rejoin tokenized and normalized string.
#text_norm = ' '.join([token.norm_ for token in no_punct if token.norm_.startswith# ('#') == False])
## Spelling correction
# Lemmatize
## NLTK ##
#lemmatized = [lemmatizer.lemmatize(word) for word in rm_swrd]
## | |
<reponame>pombredanne/docker-scripts<gh_stars>100-1000
import datetime
import docker
import hashlib
import json
import logging
import os
import re
import shutil
import six
import tarfile
import tempfile
import threading
from docker_squash.errors import SquashError, SquashUnnecessaryError
if not six.PY3:
import docker_squash.lib.xtarfile
class Chdir(object):
""" Context manager for changing the current working directory """
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None, comment=""):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.comment = comment
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warning(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError(
"Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError(
"Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(
((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug(
"Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug(
"Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(
target=self._extract_tar, args=(r, directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warning(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, | |
of <TEXT>
data = data.replace("<TEXT>", "⁂")
data = TAG_PATTERN.sub("", data)
min_offset = max(0, data.find("⁂"))
data = data.replace("⁂", "")
# Extract sentences from chunks
chunk_offset = 0
sentences = []
for chunk in chunks:
lines = chunk.split("\n")
current_sentence = []
start = offset = 0
for line in lines:
offset += len(line) + 1
if line.strip():
current_sentence.append(line)
else:
# empty line
if current_sentence:
sentence = " ".join(current_sentence)
if start + chunk_offset >= min_offset:
sentences.append(
(
sentence,
start + chunk_offset,
start + chunk_offset + len(sentence),
)
)
current_sentence = []
start = offset
if current_sentence:
sentence = " ".join(current_sentence)
if start + chunk_offset >= min_offset:
sentences.append(
(
sentence,
start + chunk_offset,
start + chunk_offset + len(sentence),
)
)
chunk_offset += len(chunk)
# Re-tokenize sentences
sentences = [
s for sent in sentences for s in sent_tokenize(sent, language=language)
]
return sentences
def read_apf_file(
path: str, time_and_val: bool = False
) -> Tuple[str, str, List[Entity], List[Relation], List[Event]]:
"""Reads an APF file.
Args:
path (str): path to the input file.
time_and_val (bool): extract times and values or not.
Returns:
doc_id (str): document ID.
source (str): document source.
entity_list (List[Entity]): a list of Entity instances.
relation_list (List[Relation]): a list of Relation instances.
event_list (List[Event]): a list of Events instances.
"""
data = open(path, "r", encoding="utf-8").read()
soup = BeautifulSoup(data, "lxml-xml")
# metadata
root = soup.find("source_file")
source = root["SOURCE"]
doc = root.find("document")
doc_id = doc["DOCID"]
entity_list, relation_list, event_list = [], [], []
# entities: nam, nom, pro
for entity in doc.find_all("entity"):
entity_id = entity["ID"]
entity_type = entity["TYPE"]
entity_subtype = entity["SUBTYPE"]
for entity_mention in entity.find_all("entity_mention"):
mention_id = entity_mention["ID"]
mention_type = entity_mention["TYPE"]
head = entity_mention.find("head").find("charseq")
start, end, text = int(head["START"]), int(head["END"]), head.text
entity_list.append(
Entity(
start,
end,
text,
entity_id,
mention_id,
entity_type,
entity_subtype,
mention_type,
)
)
if time_and_val:
# entities: value
for entity in doc.find_all("value"):
# enitty_id = entity["ID"]
entity_type = entity["TYPE"]
entity_subtype = entity.get("SUBTYPE", None)
for entity_mention in entity.find_all("value_mention"):
mention_id = entity_mention["ID"]
mention_type = "VALUE"
extent = entity_mention.find("extent").find("charseq")
start, end, text = int(extent["START"]), int(extent["END"]), extent.text
entity_list.append(
Entity(
start,
end,
text,
entity_id,
mention_id,
entity_type,
entity_subtype,
mention_type,
)
)
# entities: timex
for entity in doc.find_all("timex2"):
entity_id = entity["ID"]
# enitty_type = entity_subtype = "TIME"
value = entity.get("VAL", None)
for entity_mention in entity.find_all("timex2_mention"):
mention_id = entity_mention["ID"]
mention_type = "TIME"
extent = entity_mention.find("extent").find("charseq")
start, end, text = int(extent["START"]), int(extent["END"]), extent.text
entity_list.append(
Entity(
start,
end,
text,
entity_id,
mention_id,
entity_type,
entity_subtype,
mention_type,
value=value,
)
)
# relations
for relation in doc.find_all("relation"):
# relation_id = relation["ID"]
relation_type = relation["TYPE"]
if relation_type == "METONYMY":
continue
relation_subtype = relation["SUBTYPE"]
for relation_mention in relation.find_all("relation_mention"):
mention_id = relation_mention["ID"]
arg1 = arg2 = None
for arg in relation_mention.find_all("relation_mention_argument"):
arg_mention_id = arg["REFID"]
arg_role = arg["ROLE"]
arg_text = arg.find("extent").find("charseq").text
if arg_role == "Arg-1":
arg1 = RelationArgument(arg_mention_id, arg_role, arg_text)
elif arg_role == "Arg-2":
arg2 = RelationArgument(arg_mention_id, arg_role, arg_text)
if arg1 and arg2:
relation_list.append(
Relation(mention_id, relation_type, relation_subtype, arg1, arg2)
)
# events
for event in doc.find_all("event"):
event_id = event["ID"]
event_type = event["TYPE"]
event_subtype = event["SUBTYPE"]
# event_modality = event["MODALITY"]
# event_polarity = event["POLARITY"]
# event_genericity = event["GENERICITY"]
# event_tense = event["TENSE"]
for event_mention in event.find_all("event_mention"):
mention_id = event_mention["ID"]
trigger = event_mention.find("anchor").find("charseq")
trigger_start, trigger_end = int(trigger["START"]), int(trigger["END"])
trigger_text = trigger.text
event_args = []
for arg in event_mention.find_all("event_mention_argument"):
arg_mention_id = arg["REFID"]
arg_role = arg["ROLE"]
arg_text = arg.find("extent").find("charseq").text
event_args.append(EventArgument(arg_mention_id, arg_role, arg_text))
event_list.append(
Event(
event_id,
mention_id,
event_type,
event_subtype,
Span(trigger_start, trigger_end + 1, trigger_text),
event_args,
)
)
# remove heading/tailing spaces
for entity in entity_list:
entity.remove_space()
for event in event_list:
event.trigger.remove_space()
return doc_id, source, entity_list, relation_list, event_list
def process_entities(
entities: List[Entity], sentences: List[Tuple[str, int, int]]
) -> List[List[Entity]]:
"""Cleans entities and splits them into lists
Args:
entities (List[Entity]): a list of Entity instances.
sentences (List[Tuple[str, int, int]]): a list of sentences.
Returns:
List[List[Entity]]: a list of sentence entity lists.
"""
sentence_entities = [[] for _ in range(len(sentences))]
# assign each entity to the sentence where it appears
for entity in entities:
start, end = entity.start, entity.end
for i, (_, s, e) in enumerate(sentences):
if start >= s and end <= e:
sentence_entities[i].append(entity)
# assigned = True
break
# remove overlapping entities
sentence_entities_cleaned = [[] for _ in range(len(sentences))]
for i, entities in enumerate(sentence_entities):
if not entities:
continue
# prefer longer entities
entities.sort(key=lambda x: (x.end - x.start), reverse=True)
chars = [0] * max([x.end for x in entities])
for entity in entities:
overlap = False
for j in range(entity.start, entity.end):
if chars[j] == 1:
overlap = True
break
if not overlap:
chars[entity.start : entity.end] = [1] * (entity.end - entity.start)
sentence_entities_cleaned[i].append(entity)
sentence_entities_cleaned[i].sort(key=lambda x: x.start)
return sentence_entities_cleaned
def process_events(
events: List[Event],
sentence_entities: List[List[Entity]],
sentences: List[Tuple[str, int, int]],
) -> List[List[Event]]:
"""Cleans and assigns events.
Args:
events (List[Event]): A list of Event objects
entence_entities (List[List[Entity]]): A list of sentence entity lists.
sentences (List[Tuple[str, int, int]]): A list of sentences.
Returns:
List[List[Event]]: a list of sentence event lists.
"""
sentence_events = [[] for _ in range(len(sentences))]
# assign each event mention to the sentence where it appears
for event in events:
start, end = event.trigger.start, event.trigger.end
for i, (_, s, e) in enumerate(sentences):
sent_entities = sentence_entities[i]
if start >= s and end <= e:
# clean the argument list
arguments = []
for argument in event.arguments:
# entity_id = argument.entity_id
mention_id = argument.mention_id
for entity in sent_entities:
if entity.mention_id == mention_id:
arguments.append(argument)
break
event_cleaned = Event(
event.event_id,
event.mention_id,
event.event_type,
event.event_subtype,
trigger=event.trigger.copy(),
arguments=arguments,
)
sentence_events[i].append(event_cleaned)
# remove overlapping events
sentence_events_cleaned = [[] for _ in range(len(sentences))]
for i, events in enumerate(sentence_events):
if not events:
continue
events.sort(key=lambda x: (x.trigger.end - x.trigger.start), reverse=True)
chars = [0] * max([x.trigger.end for x in events])
for event in events:
overlap = False
for j in range(event.trigger.start, event.trigger.end):
if chars[j] == 1:
overlap = True
break
if not overlap:
chars[event.trigger.start : event.trigger.end] = [1] * (
event.trigger.end - event.trigger.start
)
sentence_events_cleaned[i].append(event)
sentence_events_cleaned[i].sort(key=lambda x: x.trigger.start)
return sentence_events_cleaned
def process_relation(
relations: List[Relation],
sentence_entities: List[List[Entity]],
sentences: List[Tuple[str, int, int]],
) -> List[List[Relation]]:
"""Cleans and assigns relations
Args:
relations (List[Relation]): a list of Relation instances.
sentence_entities (List[List[Entity]]): a list of sentence entity lists.
sentences (List[Tuple[str, int, int]]): a list of sentences.
Returns:
List[List[Relation]]: a list of sentence relation lists.
"""
sentence_relations = [[] for _ in range(len(sentences))]
for relation in relations:
mention_id1 = relation.arg1.mention_id
mention_id2 = relation.arg2.mention_id
for i, entities in enumerate(sentence_entities):
arg1_in_sent = any([mention_id1 == e.mention_id for e in entities])
arg2_in_sent = any([mention_id2 == e.mention_id for e in entities])
if arg1_in_sent and arg2_in_sent:
sentence_relations[i].append(relation)
break
elif arg1_in_sent != arg2_in_sent:
break
return sentence_relations
def tokenize(
sentence: Tuple[str, int, int],
entities: List[Entity],
events: List[Event],
language: str = "english",
) -> List[Tuple[int, int, str]]:
"""Tokenizes a sentence.
Each sentence is first split into chunks that are entity/event spans or words
between two spans. After that, word tokenization is performed on each chunk.
Args:
sentence (Tuple[str, int, int]): Sentence tuple (text, start, end)
entities (List[Entity]): A list of Entity instances.
events (List[Event]): A list of Event instances.
Returns:
List[Tuple[int, int, str]]: a list of token tuples. Each tuple consists
of three elements, start offset, end offset, and token text.
"""
text, start, end = sentence
text = mask_escape(text)
# split the sentence into chunks
splits = {0, len(text)}
for entity in entities:
splits.add(entity.start - start)
splits.add(entity.end - start)
for event in events:
splits.add(event.trigger.start - start)
splits.add(event.trigger.end - start)
splits = sorted(list(splits))
chunks = [
(splits[i], splits[i + 1], text[splits[i] : splits[i + 1]])
for i in range(len(splits) - 1)
]
# tokenize each chunk
chunks = [(s, e, t, wordpunct_tokenize(t, language=language)) for s, e, t in chunks]
# merge chunks and add word offsets
tokens = []
for chunk_start, chunk_end, chunk_text, chunk_tokens in chunks:
last = 0
chunk_tokens_ = []
for token in chunk_tokens:
token_start = chunk_text[last:].find(token)
if token_start == -1:
raise ValueError("Cannot find token {} in {}".format(token, text))
token_end = token_start + len(token)
chunk_tokens_.append(
(
token_start + start + last + chunk_start,
token_end + start + last + chunk_start,
unmask_escape(token),
)
)
last += token_end
tokens.extend(chunk_tokens_)
return tokens
def convert(
| |
<reponame>raphaeltinarrage/velour
'''
velour -> persistence
(list of functions)
Combinatorics of simplicial complexes:
- GetPowerSet
- CopySimplexTree
- GetVerticesSimplexTree
- GetSimplicesSimplexTree
- GetNeighborsSimplexTree
- IsFilteredSimplicialMap
- BarycentricSubdivisionSimplex
- BarycentricSubdivisionSimplexTree
- MappingCylinderFiltration
- MappingConeFiltration
- MappingTorusFiltration
- GetWeakSimplicialApproximation
Persistence:
- GetBettiCurves
- RipsComplex
- AlphaComplex
- WeightedRipsFiltrationValue
- WeightedRipsFiltration
- DTMFiltration
- AlphaDTMFiltration
Bundle filtrations:
- BundleFiltrationMaximalValue
- TriangulateProjectiveSpace
- GetFaceMapBundleFiltration
- ComputeLifebar
'''
import gudhi
import numpy as np
import random
import itertools
from sklearn.metrics.pairwise import euclidean_distances
from .geometry import DTM, VectorToProjection, ProjectionToVector, IntersectionLineHyperplane
def GetPowerSet(L):
'''
Returns the powerset of the list L.
Input:
L (list).
Output:
L2 (list): the power set of L.
Example:
L = range(3)
velour.GetPowerSet(L)
[(), (0,), (1,), (2,), (0, 1), (0, 2), (1, 2), (0, 1, 2)]
'''
L2 = list(itertools.chain.from_iterable(itertools.combinations(L, r) for r in range(len(L)+1)))
return L2
def CopySimplexTree(st):
'''
Hard copy of a simplex tree.
Input:
st (gudhi.SimplexTree): the simplex tree to be copied.
Output:
st1 (gudhi.SimplexTree): a copy of the simplex tree.
Example:
st = gudhi.SimplexTree()
print(st)
---> <gudhi.SimplexTree object at 0x7fded6968e30>
velour.CopySimplexTree(st)
---> <gudhi.SimplexTree at 0x7fded6968d90>
'''
st2 = gudhi.SimplexTree()
for filtr in st.get_filtration():
st2.insert(filtr[0], filtr[1])
return st2
def GetVerticesSimplexTree(st):
'''
Returns the list of vertices of the simplex tree st.
Input:
st (gudhi.SimplexTree): the simplex tree.
Output:
Vertices (list of int): the vertices of st.
Example:
st = gudhi.SimplexTree()
st.insert([0,2,258],0)
velour.GetVerticesSimplexTree(st)
---> [0, 2, 258]
'''
Vertices = [simplex[0][0] for simplex in st.get_skeleton(0)]
return Vertices
def GetSimplicesSimplexTree(st, dim):
'''
Returns the list of simplices of the simplex tree st of dimension dim (i.e.,
of length dim+1).
Input:
st (gudhi.SimplexTree): the simplex tree.
Output:
Simplices (list of list of int): the simplices of st of dimension dim.
Example:
st = gudhi.SimplexTree()
st.insert(range(3),0)
velour.GetSimplicesSimplexTree(st, dim=1)
---> [[0, 1], [0, 2], [1, 2]]
'''
Simplices = [simplex[0] for simplex in st.get_skeleton(dim) if len(simplex[0])==dim+1]
return Simplices
def GetNeighborsSimplexTree(st, v, t=np.inf, closed = True):
'''
Returns the list of neighbors of the vertex v in the simplex tree st,
at time t. If closed == True, v itself is considered as a neighbor.
Input:
st (gudhi.SimplexTree): the simplex tree.
v (int): the vertex.
t (float, optional): the time at which we consider the simplicial complex.
closed (bool, optional): whether to count v itself in the neighbors.
Output:
Neighbors (list of int): the neighbors of v.
Example:
st = gudhi.SimplexTree()
st.insert([0,1],0)
st.insert([1,2],0)
v = 0
velour.GetNeighborsSimplexTree(st, v)
---> [1, 0]
'''
Neighbors = []
Edges = st.get_cofaces([v],1) #get the edges containing v
for filtr in Edges:
if filtr[1] <= t:
simplex = filtr[0]
simplex.remove(v)
Neighbors.append(simplex[0])
if closed:
Neighbors.append(v)
return Neighbors
def IsFilteredSimplicialMap(st1,st2,g):
'''
Tests whether a map g: st1 --> st2 between vertex sets of two simplex trees is a simplicial map.
Input:
st1 (gudhi.SimplexTree): simplex tree, domain of g.
st2 (gudhi.SimplexTree): simplex tree, codomain of g.
g (dict int:int): a map between vertex sets of st1 and st2.
'''
time_not_simplicial = -1
for filtr in st1.get_filtration():
face = filtr[0]
gface = [g[v] for v in face]
gface = list(np.unique(gface))
t = st2.filtration(gface)
if t == np.inf:
time_not_simplicial = filtr[1]
break
if time_not_simplicial == -1:
print('The map is simplicial all along the filtration.', flush=True)
else:
print('The map is not simplicial from t = '+repr(time_not_simplicial)+'.', flush=True)
def BarycentricSubdivisionSimplex(Vertices):
'''
Subdivises barycentrically the simplex L. The resulting simplicial complex
has 2^n-1 vertices and n! maximal faces.
The vertices of the subdivision are the elements of the power set of the
input vertices (geometrically, such a vertex represents the barycenter of the vertices).
The maximal faces of the subdivision are given as permutations of the input
vertices. The permutation 021 corresponds to the simplex [(0), (0,2), (0,2,1)].
Input:
Vertices (list of int): length n, representing a (n-1)-simplex.
Output:
MaximalFaces (list of int): length n!, representing the maximal faces of
the barycentric subdivision of the simplex
NewVertices (list of int): length 2^n-1. If v is a vertex of the subdivision,
NewVertices[v] is the list of input vertices
that represents v.
Example:
Vertices = range(3) #vertices of a triangle
MaximalFaces, NewVertices = velour.BarycentricSubdivisionSimplex(Vertices)
MaximalFaces
---> [[0, 3, 6], [0, 4, 6], [1, 3, 6], [1, 5, 6], [2, 4, 6], [2, 5, 6]]
NewVertices
---> [(0,), (1,), (2,), (0, 1), (0, 2), (1, 2), (0, 1, 2)]
'''
NewVertices = GetPowerSet(Vertices)
del NewVertices[0]
# the vertices of the subdivision are the elements of the power set of the input vertices
DictNewVertices = {tuple(NewVertices[i]):i for i in range(len(NewVertices))}
# a dict that indexes the new vertices. (0,): 0, (1,): 1, (0, 1): 2, ...
Permutations = list(itertools.permutations(Vertices))
# the maximal faces of the subdivision are given as permutations of the input vertices.
MaximalFaces = []
for perm in Permutations:
face = [tuple(sorted(perm[:i+1])) for i in range(len(perm))]
facedict=[DictNewVertices[v] for v in face]
MaximalFaces.append(facedict)
# we compute the index (in NewVertices) of the vertices of each maximal face
# elements of MaximalFaces are [0, 2, 6], [0, 4, 6], ...
return MaximalFaces, NewVertices
def BarycentricSubdivisionSimplexTree(st, X=[], bool_dict=False):
'''
Subdivises barycentrically the simplex tree st. It consists in subdivising
each of its simplices.
If bool_dict == True, returns a dict that gives the power-set form of the
vertices (see function BarycentricSubdivisionSimplex).
Input:
st (gudhi.simplex_tree): the simplex tree to subdivise.
X (np.array, optional): size NxM, representing the coordinates of the
vertices of the simplicial complex.
bool_dict (bool, optional): whether to return the dict.
Output:
st_sub (gudhi.simplex_tree): the subdivised simplex tree.
dictNewVertices (dict tuples:int)
Y (optional, if X!=[]): a N'xM np.array, representing the vertices of
the new simplicial complex, according to the shadow map.
Example:
st = gudhi.SimplexTree()
st.insert([0,1,2], 0)
st.remove_maximal_simplex([0,1,2]) #st is a circle
st_sub, dictNewVertices = velour.BarycentricSubdivisionSimplexTree(st, bool_dict = True)
st_sub.num_vertices()
---> 6
dictNewVertices
---> {(0,): 0, (1,): 1, (0, 1): 2, (2,): 3, (0, 2): 4, (1, 2): 5}
Example:
st = gudhi.SimplexTree()
st.insert([0,1], 0)
X = np.array([[1,0],[0,1]])
st_sub, Y = velour.BarycentricSubdivisionSimplexTree(st, X=X)
---> The new simplex tree has 3 vertices (previously 2).
Y
---> array([[1. , 0. ],
[0. , 1. ],
[0.5, 0.5]])
'''
st_sub = gudhi.SimplexTree()
dictNewVertices = {}
l=0
for filtr in st.get_filtration():
simplex = filtr[0]
time = filtr[1]
MaximalFaces, NewVertices = BarycentricSubdivisionSimplex(simplex)
for v in NewVertices:
if tuple(v) not in dictNewVertices:
dictNewVertices[tuple(v)] = l
l+=1
for face in MaximalFaces:
faceNew = [NewVertices[w] for w in face]
faceDict = [dictNewVertices[tuple(w)] for w in faceNew]
st_sub.insert(faceDict, time)
if bool_dict:
return st_sub, dictNewVertices
elif np.size(X)==0:
return st_sub
else:
dictNewVerticesInv = {dictNewVertices[v]:v for v in dictNewVertices}
Y = np.zeros((len(dictNewVerticesInv), np.shape(X)[1]))
for v in dictNewVerticesInv:
Y[v] = np.mean([X[i,:] for i in dictNewVerticesInv[v]],0)
result_str = 'Subdivised Complex is of dimension ' + repr(st_sub.dimension()) + ' (previously '+ repr(st.dimension()) + ') - ' + \
repr(st_sub.num_simplices()) + ' simplices (previously '+ repr(st.num_simplices())+ ') - ' + \
repr(st_sub.num_vertices()) + ' vertices (previously '+ repr(st.num_vertices())+ ').'
print(result_str, flush=True)
return st_sub, Y
def MappingCylinderFiltration(st_X, st_Y, g):
'''
Creates a filtration of the mapping cylinder of g: st_X --> st_Y.
Input:
st_X (gudhi.SimplexTree): simplex tree, domain of g.
st_Y (gudhi.SimplexTree): simplex tree, codomain of g.
g (dict int:int): a simplicial map { (vertices of st_X):(vertices of st_Y)}.
Output:
st_cyl (gudhi.SimplexTree): the mapping cylinder of g.
Example:
st_X = gudhi.SimplexTree()
st_X.insert([0,1,2], 0)
st_X.remove_maximal_simplex([0,1,2]) #st is sphere
g = {0:0,1:1,2:2}
st_cyl = velour.MappingCylinderFiltration(st_X, st_X, g) #st_cyl is a cylinder
st_cyl.persistence(persistence_dim_max=True, homology_coeff_field = 2)
---> [(1, (0.0, inf)), (0, (0.0, inf))]
'''
NewVerticesList = [tuple([v,0]) for v in GetVerticesSimplexTree(st_X)]+[tuple([v,1]) for v in GetVerticesSimplexTree(st_Y)]
NewVertices = {NewVerticesList[i]:i for i in range(len(NewVerticesList))}
st_cyl = gudhi.SimplexTree()
#insert st_X
for filtr in st_X.get_filtration():
simplex = filtr[0]
simplex_cyl = [NewVertices[tuple([v,0])] for v in simplex]
st_cyl.insert(simplex_cyl, filtr[1])
#insert st_Y
for filtr in st_Y.get_filtration():
simplex = filtr[0]
simplex_cyl = | |
<reponame>InDeepShip/InDeepShip
from django.shortcuts import render
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from rest_framework.response import Response
# from rest_framework.authentication import BaseAuthentication
from rest_framework import status
from aft import settings
from .models import Vessel, Port, Propulsion, ReservedName, Registration, Surveyor, MerchantVessel
from django.core import serializers
from users import models as user_models
from rest_framework.permissions import AllowAny
# from users.models import Broker, PrivateUser
import requests
import json
from django.http import HttpResponse, JsonResponse
from datetime import datetime, date, timedelta
@api_view(['GET'])
@permission_classes([AllowAny])
def api_overview(request):
"""
# DESCRIPTION
Returns a list of all DRS APIs
"""
api_urls = {
"Login": "/api/users/",
"Sign up": "/api/users/signup/",
"List users": "/api/users/all/",
"Password reset": "/api/users/password_reset/",
"Password reset confirm": "/api/users/password_reset/confirm/",
"Password change": "/api/users/password_change/",
"Login": "/api/users/login/",
"Logout": "/api/users/logout/",
"Password change": "/api/users/password/change/",
"Bug report": "/api/bugreport/",
"Name look up": "/api/vessel_lookup/",
}
return Response(data=api_urls)
@api_view(['POST'])
@permission_classes([AllowAny])
def imo_lookup(request):
"""
Function used to determine if a IMO already exists in the database
"""
data = request.data
imo = data['imo']
try:
vessel = Vessel.objects.get(imo=int(imo))
except Exception as e:
# Some error occurred trying to get query Vessels
return Response({ "exists": False }, status=200)
return Response({ "exists": True }, status=200)
@api_view(['POST'])
@permission_classes([AllowAny])
def vessel_lookup(request):
"""
# DESCRIPTION
# USAGE
1) Accepts a post request with vesselName parameter and portName param set, for example with the payload:
```
{
"vesselName" : "<NAME>",
"portName" : "Miami"
}
```
This will query the backend db and check to see if there is a vessel with that name and port attached.
Return message:
The server will return a message to the client letting them know the status of that ship name.
2) Accepts a post request with only vesselName parameter set, for example with the payload:
```
{
"vesselName" : "<NAME>",
}
```
This will query the backend db and check to see what ports are availble for that name.
Return message:
The server will return a message to the client letting them know the status of that ship name.
"""
ship_name = request.data.get("vesselName", "")
port_name = request.data.get("portName", "")
if port_name != "":
try:
ships_with_name = Vessel.objects.get(
name=ship_name, port__name=port_name)
message = f"There is already a vessel with the name {ship_name} in the port {port_name}."
name_available = False
except Vessel.DoesNotExist:
message = f"The name {ship_name} is available in the port {port_name}."
name_available = True
return Response(data={"message": message, "available": name_available}, status=200)
else:
port_names = [port.name for port in Port.objects.all()]
try:
vessels = Vessel.objects.filter(name=ship_name)
for v in vessels:
port_names.remove(v.port.name)
except Vessel.DoesNotExist:
pass
try:
reserve_names = ReservedName.objects.filter(name=ship_name)
for n in reserve_names:
if n.port.name in port_names:
port_names.remove(n.port.name)
except ReservedName.DoesNotExist:
pass
if len(port_names) == 0:
name_available = False
message = f"{ship_name} is not availble at any of our ports."
else:
port_string = ""
for p in port_names:
port_string += p + ", "
print(ship_name)
print(port_names)
message = f"{ship_name} is available at {port_string.strip()[:-1]}"
name_available = True
return Response(data={"message": message, "available": name_available, "ports": port_names}, status=200)
@api_view(['POST'])
@permission_classes([AllowAny])
def bug_report(request):
"""
# DESCRIPTION
`POST /api/bugreport` satisifies the user story of [bug report](https://www.notion.so/User-Stories-6b653ed6007841e099e42e82aa6ff8e8) by allowing client and users to report any bugs with DRS.
# USAGE
Accepts a post request with message parameter and currentPage param set, for example with the payload:
```
{
"message" : "this is a bug report",
"currentPage" : "http://206.189.218.111/services"
}
```
# bug-report` Slack channel.
and sends a bug report with that message to the `
It returns a message with the status code of the post request to Slack, for example:
```
{
"message" : "bug report submitted to Slack with status code: 200"
}
```
Otherwise it returns an error message with the status code set.
"""
request_message = request.data.get("message", "")
currentPage = request.data.get("currentPage", "")
if request_message.strip() != '':
message = "BUG REPORT: " + request_message + "\n\n" + "PAGE: " + currentPage
url = settings.SLACK_WEBHOOK
myobj = {"text": message}
ret = requests.post(url=url, json=myobj)
data = {
"message": "bug report submitted to Slack with status code: %s" % str(ret.status_code)
}
ret_status = status.HTTP_200_OK
else:
data = {
"error": "message param was empty"
}
ret_status = status.HTTP_400_BAD_REQUEST
return Response(data=data, status=ret_status)
@api_view(['GET'])
@permission_classes([AllowAny])
def ports(request):
# get all ports from the database
port_names = [port.name for port in Port.objects.all()]
data = {"ports": port_names}
return Response(data=data, status=200)
@api_view(['GET'])
@permission_classes([AllowAny])
def propulsion_methods(request):
# get all propulsion methods from the database
propulsion_names = [
propulsion.name for propulsion in Propulsion.objects.all()]
data = {"propulsion_methods": propulsion_names}
return Response(data=data, status=200)
@api_view(["POST"])
def reserve_name(request):
data = request.data
email = data.get("email")
# get the email of the user submitting the app
user_with_email = user_models.CustomUser.objects.get(email=email)
name_to_reserve = data.get("name")
port_to_reserve = data.get("port")
try:
port_to_reserve = Port.objects.get(name=port_to_reserve)
except Port.DoesNotExist:
print("The submitted port does not exist.")
return HttpResponse(status=400)
new_name_object = ReservedName(name=name_to_reserve,
port=port_to_reserve,
reserving_user=user_with_email)
# save the new reserved name object
new_name_object.save()
print("Name reserved.")
return HttpResponse(status=200)
@api_view(["GET"])
def get_user_vessels(request):
user_email = request.GET.get("email", "")
if user_email == "":
message = "There is no email attached to the request."
return Response(
data={"message": message},
status=200)
try:
user = user_models.CustomUser.objects.get(email=user_email)
except user_models.CustomUser.DoesNotExist:
message = "There is no user in the database with that email."
return Response(
data={"message": message},
status=200)
vessels = Vessel.objects.filter(owner__email=user_email)
data = {"vessels": vessels}
return Response(data=data, status=200)
@api_view(["GET"])
def get_registrations(request):
user_email = request.GET.get("email", "")
if user_email == "":
message = "There is no email attached to the request."
return Response(
data={"message": message},
status=200)
try:
user = user_models.CustomUser.objects.get(email=user_email)
except user_models.CustomUser.DoesNotExist:
message = "There is no user in the database with that email."
return Response(
data={"message": message},
status=200)
regs = Registration.objects.filter(owner__email=user_email)
data = {"registrations": regs}
return Response(data=data, status=200)
@api_view(["GET"])
def get_single_registration(request):
given_imo = request.GET.get("imo", "")
user_email = request.GET.get("email", "")
if user_email == "":
message = "There is no email attached to the request."
return Response(
data={"message": message},
status=200)
if given_imo == "":
message = "There is no imo attached to the request."
return Response(
data={"message": message},
status=200)
try:
user = user_models.CustomUser.objects.get(email=user_email)
ship = Vessel.objects.get(imo=given_imo)
except user_models.CustomUser.DoesNotExist:
message = "There is no user in the database with that email."
return Response(
data={"message": message},
status=200)
except Vessel.DoesNotExist:
message = "There is no ship in the database with that imo."
return Response(
data={"message": message},
status=200)
results = Registration.objects.filter(vessel__imo=given_imo, owner__email=user_email)
if len(results) == 0:
data = {"message": "Not found"}
status = 404
else:
regs = []
for reg in results.values():
# print(reg)
reg['vessel'] = Vessel.objects.filter(id=reg['vessel_id']).values()[0]
reg['port'] = Port.objects.filter(id=reg['port_id']).values()[0]
reg['propulsion'] = Port.objects.filter(id=reg['propulsion_id']).values()[0]
reg['owner'] = user_models.CustomUser.objects.filter(id=reg['owner_id']).values()[0]
print(reg)
regs.append(reg)
status = 200
return Response(data=regs, status=status)
@api_view(["GET"])
@authentication_classes([])
@permission_classes([AllowAny])
def get_merchant_vessels(request):
'''
The surveyor api accepts an `api-key` header assigned to a Surveyor. Returns an array of assigned merchant vessel ships.
'''
api_key = request.headers.get("api-key", "")
# print(api_key)
# TODO check formatting
if api_key == "" or len(api_key) != 36:
# message = "Invalid or missing API Key"
data = {"message": "Invalid or missing API Key"}
status = 405
else:
results = MerchantVessel.objects.filter(api_key=api_key)
if len(results) == 0:
# message = "Not found"
data = {"message": "Not found"}
status = 404
else:
def del_api(v): del v["api_key"]; return v
vessels = [del_api(v) for v in results.values()]
# message = "Success"
data = {"message": "Success", "vessels": vessels}
status = 200
return Response(data=data, status=status)
@api_view(["GET"])
@authentication_classes([])
@permission_classes([AllowAny])
def get_all_merchant_vessels(request):
'''
Returns an array of all merchant vessels.
'''
results = MerchantVessel.objects.filter()
def del_api(v): del v["api_key"]; return v
vessels = [del_api(v) for v in results.values()]
# message = "Success"
data = {"message": "Success", "vessels": vessels}
status = 200
return Response(data=data, status=status)
@api_view(["GET"])
def get_statuses(request):
user_email = request.GET.get("email", "")
if user_email == "":
message = "There is no email attached to the request."
return Response(
data={"message": message},
status=200)
try:
user = user_models.CustomUser.objects.get(email=user_email)
except user_models.CustomUser.DoesNotExist:
message = "There is no user in the database with that email."
return Response(
data={"message": message},
status=200)
try:
regs = Registration.objects.filter(owner_id=user.id)
except user_models.CustomUser.DoesNotExist:
message = "There is no registration in the database under that user id."
return Response(data={"message": message}, status=200)
ships = []
for reg in regs:
try:
vessel = Vessel.objects.get(id=reg.vessel_id)
except user_models.CustomUser.DoesNotExist:
message = "There is no vessel in the database under that registration id."
return Response(data={"message": message}, status=200)
try:
port = Port.objects.get(id=vessel.port_id)
except user_models.CustomUser.DoesNotExist:
message = "There is no port in the database under that port id."
return Response(data={"message": message}, status=200)
port = port.name
name = vessel.name
imo = vessel.imo
start_date = reg.start_date
expiration_date = reg.expiration_date
# old expirations with no expiration date default to registered
if | |
y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzxyz(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzxzx(x,y,z):
return 15*x*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*z**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzxzy(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzxzz(x,y,z):
return 15*z*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzyxx(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txzyxy(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzyxz(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzyyx(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzyyy(x,y,z):
return 315*x*y*z*(-3*y**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txzyyz(x,y,z):
return 15*x*(-63*y**2*z**2/(x**2 + y**2 + z**2)**2 + 7*y**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzyzx(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzyzy(x,y,z):
return 15*x*(-63*y**2*z**2/(x**2 + y**2 + z**2)**2 + 7*y**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzyzz(x,y,z):
return 315*x*y*z*(-3*z**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txzzxx(x,y,z):
return 15*x*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*z**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzzxy(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzzxz(x,y,z):
return 15*z*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzzyx(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzzyy(x,y,z):
return 15*x*(-63*y**2*z**2/(x**2 + y**2 + z**2)**2 + 7*y**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzzyz(x,y,z):
return 315*x*y*z*(-3*z**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txzzzx(x,y,z):
return 15*z*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzzzy(x,y,z):
return 315*x*y*z*(-3*z**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txzzzz(x,y,z):
return 45*x*(-21*z**4/(x**2 + y**2 + z**2)**2 + 14*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxxxx(x,y,z):
return 45*y*(-21*x**4/(x**2 + y**2 + z**2)**2 + 14*x**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxxxy(x,y,z):
return 15*x*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxxxz(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Tyxxyx(x,y,z):
return 15*x*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxxyy(x,y,z):
return 15*y*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxxyz(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxxzx(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Tyxxzy(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxxzz(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxyxx(x,y,z):
return 15*x*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxyxy(x,y,z):
return 15*y*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxyxz(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxyyx(x,y,z):
return 15*y*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxyyy(x,y,z):
return 45*x*(-21*y**4/(x**2 + y**2 + z**2)**2 + 14*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxyyz(x,y,z):
return 315*x*y*z*(-3*y**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Tyxyzx(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxyzy(x,y,z):
return 315*x*y*z*(-3*y**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Tyxyzz(x,y,z):
return 15*x*(-63*y**2*z**2/(x**2 + y**2 + z**2)**2 + 7*y**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxzxx(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Tyxzxy(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxzxz(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxzyx(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxzyy(x,y,z):
return 315*x*y*z*(-3*y**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Tyxzyz(x,y,z):
return 15*x*(-63*y**2*z**2/(x**2 + y**2 + z**2)**2 + 7*y**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxzzx(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxzzy(x,y,z):
return 15*x*(-63*y**2*z**2/(x**2 + y**2 + z**2)**2 + 7*y**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyxzzz(x,y,z):
return 315*x*y*z*(-3*z**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Tyyxxx(x,y,z):
return 15*x*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyyxxy(x,y,z):
return 15*y*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyyxxz(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyyxyx(x,y,z):
return 15*y*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyyxyy(x,y,z):
return 45*x*(-21*y**4/(x**2 + y**2 + z**2)**2 + 14*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Tyyxyz(x,y,z):
return 315*x*y*z*(-3*y**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Tyyxzx(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def | |
(f_v[2]-isect).length
w0 = (l1+l2)
w1 = (l0+l2)
w2 = (l1+l0)
totw= w0 + w1 + w2
w0=w0/totw
w1=w1/totw
w2=w2/totw
return f, side, w0, w1, w2
def pickMeshGroupWeight(me, act_group, orig, rdir):
f, side, w0, w1, w2= pickMeshRayFaceWeight(me, orig, rdir)
if f==None:
return None
f_v= f.v
if side==0:
f_vi= (f_v[0].index, f_v[1].index, f_v[2].index)
else:
f_vi= (f_v[0].index, f_v[2].index, f_v[3].index)
vws= [0.0,0.0,0.0]
for i in xrange(3):
try: vws[i]= me.getVertsFromGroup(act_group, 1, [f_vi[i],])[0][1]
except: pass
return w0*vws[0] + w1*vws[1] + w2*vws[2]
def pickMeshGroupVCol(me, orig, rdir):
Vector= Blender.Mathutils.Vector
f, side, w0, w1, w2= pickMeshRayFaceWeight(me, orig, rdir)
if f==None:
return None
def col2vec(c):
return Vector(c.r, c.g, c.b)
if side==0:
idxs= 0,1,2
else:
idxs= 0,2,3
f_c= f.col
f_colvecs= [col2vec(f_c[i]) for i in idxs]
return f_colvecs[0]*w0 + f_colvecs[1]*w1 + f_colvecs[2]*w2
def edge_face_users(me):
'''
Takes a mesh and returns a list aligned with the meshes edges.
Each item is a list of the faces that use the edge
would be the equiv for having ed.face_users as a property
'''
face_edges_dict= dict([(ed.key, (ed.index, [])) for ed in me.edges])
for f in me.faces:
fvi= [v.index for v in f]# face vert idx's
for edkey in f.edge_keys:
face_edges_dict[edkey][1].append(f)
face_edges= [None] * len(me.edges)
for ed_index, ed_faces in face_edges_dict.itervalues():
face_edges[ed_index]= ed_faces
return face_edges
def face_edges(me):
'''
Returns a list alligned to the meshes faces.
each item is a list of lists: that is
face_edges -> face indicies
face_edges[i] -> list referencs local faces v indicies 1,2,3 &| 4
face_edges[i][j] -> list of faces that this edge uses.
crap this is tricky to explain :/
'''
face_edges= [ [-1] * len(f) for f in me.faces ]
face_edges_dict= dict([(ed.key, []) for ed in me.edges])
for fidx, f in enumerate(me.faces):
for i, edkey in enumerate(f.edge_keys):
edge_face_users= face_edges_dict[edkey]
edge_face_users.append(f)
face_edges[fidx][i]= edge_face_users
return face_edges
def facesPlanerIslands(me):
def roundvec(v):
return round(v[0], 4), round(v[1], 4), round(v[2], 4)
face_props= [(cent, no, roundvec(no), cent.dot(no)) for f in me.faces for no, cent in ((f.no, f.cent),)]
face_edge_users= face_edges(me)
islands= []
used_faces= [0] * len(me.faces)
while True:
new_island= False
for i, used_val in enumerate(used_faces):
if used_val==0:
island= [i]
new_island= True
used_faces[i]= 1
break
if not new_island:
break
island_growing= True
while island_growing:
island_growing= False
for fidx1 in island[:]:
if used_faces[fidx1]==1:
used_faces[fidx1]= 2
face_prop1= face_props[fidx1]
for ed in face_edge_users[fidx1]:
for f2 in ed:
fidx2= f2.index
if fidx1 != fidx2 and used_faces[fidx2]==0:
island_growing= True
face_prop2= face_props[fidx2]
# normals are the same?
if face_prop1[2]==face_prop2[2]:
if abs(face_prop1[3] - face_prop1[1].dot(face_prop2[0])) < 0.000001:
used_faces[fidx2]= 1
island.append(fidx2)
islands.append([me.faces[i] for i in island])
return islands
def facesUvIslands(me, PREF_IMAGE_DELIMIT=True):
def roundvec(v):
return round(v[0], 4), round(v[1], 4)
if not me.faceUV:
return [ list(me.faces), ]
# make a list of uv dicts
face_uvs= [ [roundvec(uv) for uv in f.uv] for f in me.faces]
# key - face uv || value - list of face idxs
uv_connect_dict= dict([ (uv, [] ) for f_uvs in face_uvs for uv in f_uvs])
for i, f_uvs in enumerate(face_uvs):
for uv in f_uvs: # loops through rounded uv values
uv_connect_dict[uv].append(i)
islands= []
used_faces= [0] * len(me.faces)
while True:
new_island= False
for i, used_val in enumerate(used_faces):
if used_val==0:
island= [i]
new_island= True
used_faces[i]= 1
break
if not new_island:
break
island_growing= True
while island_growing:
island_growing= False
for fidx1 in island[:]:
if used_faces[fidx1]==1:
used_faces[fidx1]= 2
for uv in face_uvs[fidx1]:
for fidx2 in uv_connect_dict[uv]:
if fidx1 != fidx2 and used_faces[fidx2]==0:
if not PREF_IMAGE_DELIMIT or me.faces[fidx1].image==me.faces[fidx2].image:
island_growing= True
used_faces[fidx2]= 1
island.append(fidx2)
islands.append([me.faces[i] for i in island])
return islands
#def faceUvBounds(me, faces= None):
def facesUvRotate(me, deg, faces= None, pivot= (0,0)):
'''
Faces can be None an all faces will be used
pivot is just the x/y well rotated about
positive deg value for clockwise rotation
'''
if faces==None: faces= me.faces
pivot= Blender.Mathutils.Vector(pivot)
rotmat= Blender.Mathutils.RotationMatrix(-deg, 2)
for f in faces:
f.uv= [((uv-pivot)*rotmat)+pivot for uv in f.uv]
def facesUvScale(me, sca, faces= None, pivot= (0,0)):
'''
Faces can be None an all faces will be used
pivot is just the x/y well rotated about
sca can be wither an int/float or a vector if you want to
scale x/y seperately.
a sca or (1.0, 1.0) will do nothing.
'''
def vecmulti(v1,v2):
'''V2 is unchanged'''
v1[:]= (v1.x*v2.x, v1.y*v2.y)
return v1
sca= Blender.Mathutils.Vector(sca)
if faces==None: faces= me.faces
pivot= Blender.Mathutils.Vector(pivot)
for f in faces:
f.uv= [vecmulti(uv-pivot, sca)+pivot for uv in f.uv]
def facesUvTranslate(me, tra, faces= None, pivot= (0,0)):
'''
Faces can be None an all faces will be used
pivot is just the x/y well rotated about
'''
if faces==None: faces= me.faces
tra= Blender.Mathutils.Vector(tra)
for f in faces:
f.uv= [uv+tra for uv in f.uv]
def edgeFaceUserCount(me, faces= None):
'''
Return an edge aligned list with the count for all the faces that use that edge. -
can spesify a subset of the faces, so only those will be counted.
'''
if faces==None:
faces= me.faces
max_vert= len(me.verts)
else:
# find the lighest vert index
pass
edge_users= [0] * len(me.edges)
edges_idx_dict= dict([(ed.key, ed.index) for ed in me.edges])
for f in faces:
for edkey in f.edge_keys:
edge_users[edges_idx_dict[edkey]] += 1
return edge_users
#============================================================================#
# Takes a face, and a pixel x/y on the image and returns a worldspace x/y/z #
# will return none if the pixel is not inside the faces UV #
#============================================================================#
def getUvPixelLoc(face, pxLoc, img_size = None, uvArea = None):
TriangleArea= Blender.Mathutils.TriangleArea
Vector= Blender.Mathutils.Vector
if not img_size:
w,h = face.image.size
else:
w,h= img_size
scaled_uvs= [Vector(uv.x*w, uv.y*h) for uv in f.uv]
if len(scaled_uvs)==3:
indicies= ((0,1,2),)
else:
indicies= ((0,1,2), (0,2,3))
for fidxs in indicies:
for i1,i2,i3 in fidxs:
# IS a point inside our triangle?
# UVArea could be cached?
uv_area = TriangleArea(scaled_uvs[i1], scaled_uvs[i2], scaled_uvs[i3])
area0 = TriangleArea(pxLoc, scaled_uvs[i2], scaled_uvs[i3])
area1 = TriangleArea(pxLoc, scaled_uvs[i1], scaled_uvs[i3])
area2 = TriangleArea(pxLoc, scaled_uvs[i1], scaled_uvs[i2])
if area0 + area1 + area2 > uv_area + 1: # 1 px bleed/error margin.
pass # if were a quad the other side may contain the pixel so keep looking.
else:
# We know the point is in the tri
area0 /= uv_area
area1 /= uv_area
area2 /= uv_area
# New location
return Vector(\
face.v[i1].co[0]*area0 + face.v[i2].co[0]*area1 + face.v[i3].co[0]*area2,\
face.v[i1].co[1]*area0 + face.v[i2].co[1]*area1 + face.v[i3].co[1]*area2,\
face.v[i1].co[2]*area0 + face.v[i2].co[2]*area1 + face.v[i3].co[2]*area2\
)
return None
# Used for debugging ngon
"""
def draw_loops(loops):
me= Blender.Mesh.New()
for l in loops:
#~ me= Blender.Mesh.New()
i= len(me.verts)
me.verts.extend([v[0] for v in l])
try:
me.verts[0].sel= 1
except:
pass
me.edges.extend([ (j-1, j) for j in xrange(i+1, len(me.verts)) ])
# Close the edge?
me.edges.extend((i, len(me.verts)-1))
#~ ob= Blender.Object.New('Mesh')
#~ ob.link(me)
#~ scn= Blender.Scene.GetCurrent()
#~ scn.link(ob)
#~ ob.Layers= scn.Layers
#~ ob.sel= 1
# Fill
#fill= Blender.Mathutils.PolyFill(loops)
#me.faces.extend(fill)
ob= Blender.Object.New('Mesh')
ob.link(me)
scn= Blender.Scene.GetCurrent()
scn.link(ob)
ob.Layers= scn.Layers
ob.sel= 1
Blender.Window.RedrawAll()
"""
def ngon(from_data, indices, PREF_FIX_LOOPS= True):
'''
Takes a polyline of indices (fgon)
and returns a list of face indicie lists.
Designed to be used for importers that need indices for an fgon to create from existing verts.
from_data: either a mesh, or a list/tuple of vectors.
indices: a list of indicies to use this list is the ordered closed polyline to fill, and can be a subset of the data given.
PREF_FIX_LOOPS: If this is enabled polylines that use loops to make multiple polylines are delt with correctly.
'''
if not set: # Need sets for this, otherwise do a normal fill.
PREF_FIX_LOOPS= False
Vector= Blender.Mathutils.Vector
if not indices:
return []
# return []
def rvec(co): return round(co.x, 6), round(co.y, 6), round(co.z, 6)
def mlen(co): return abs(co[0])+abs(co[1])+abs(co[2]) # manhatten length of a vector, faster then length
def vert_treplet(v, i):
return v, rvec(v), i, mlen(v)
def ed_key_mlen(v1, v2):
if v1[3] > v2[3]:
return v2[1], v1[1]
else:
return v1[1], v2[1]
if not PREF_FIX_LOOPS:
'''
Normal single concave loop filling
'''
if type(from_data) in (tuple, list):
verts= [Vector(from_data[i]) for ii, i in enumerate(indices)]
else:
verts= [from_data.verts[i].co for ii, i in enumerate(indices)]
for i in xrange(len(verts)-1, 0, -1): # same as reversed(xrange(1, len(verts))):
if verts[i][1]==verts[i-1][0]:
verts.pop(i-1)
fill= Blender.Geometry.PolyFill([verts])
else:
'''
Seperate this loop into multiple loops be finding edges that are used twice
This is used by lightwave LWO files a lot
'''
if type(from_data) in (tuple, list):
verts= [vert_treplet(Vector(from_data[i]), ii) for ii, i in enumerate(indices)]
else:
verts= [vert_treplet(from_data.verts[i].co, ii) for ii, i in enumerate(indices)]
edges= [(i, i-1) for i in xrange(len(verts))]
if edges:
edges[0]= (0,len(verts)-1)
if not verts:
return []
edges_used= set()
edges_doubles= set()
# We need to check if any edges are used twice location based.
for ed in edges:
edkey= ed_key_mlen(verts[ed[0]], verts[ed[1]])
if edkey in edges_used:
edges_doubles.add(edkey)
else:
edges_used.add(edkey)
# Store a list of unconnected loop segments split by double edges.
# will join later
loop_segments= []
v_prev= verts[0]
context_loop= [v_prev]
loop_segments= [context_loop]
for v in verts:
if v!=v_prev:
# Are we crossing an edge we removed?
if ed_key_mlen(v, v_prev) in edges_doubles:
context_loop= [v]
loop_segments.append(context_loop)
else:
if context_loop and context_loop[-1][1]==v[1]:
#raise "as"
pass
else:
context_loop.append(v)
v_prev= v
# Now join loop segments
def join_seg(s1,s2):
if s2[-1][1]==s1[0][1]: #
s1,s2= s2,s1
elif s1[-1][1]==s2[0][1]:
pass
else:
return False
# If were stuill here s1 and s2 are 2 segments in the same polyline
s1.pop() # remove the last vert from s1
s1.extend(s2) # add segment 2 to segment 1
if s1[0][1]==s1[-1][1]: # remove endpoints double
s1.pop()
s2[:]= [] # Empty this segment s2 so we dont use it again.
return True
joining_segments= True
while joining_segments:
joining_segments= False
segcount= len(loop_segments)
for j in xrange(segcount-1, -1, -1): #reversed(xrange(segcount)):
seg_j= loop_segments[j]
if seg_j:
for k in xrange(j-1, -1, -1): # reversed(xrange(j)):
if not seg_j:
break
seg_k= loop_segments[k]
if seg_k and join_seg(seg_j, seg_k):
joining_segments= True
loop_list= loop_segments
for verts in loop_list:
while verts and verts[0][1]==verts[-1][1]:
verts.pop()
loop_list= [verts for verts in loop_list if len(verts)>2]
# DONE DEALING WITH LOOP FIXING
# vert mapping
vert_map= [None]*len(indices)
ii=0
for verts in loop_list:
if len(verts)>2:
for i, vert in enumerate(verts):
vert_map[i+ii]= vert[2]
ii+=len(verts)
fill= Blender.Geometry.PolyFill([ [v[0] for v in loop] for loop in loop_list ])
#draw_loops(loop_list)
#raise 'done loop'
# map to original indicies
fill= [[vert_map[i] for i in reversed(f)] for f in fill]
if not fill:
print 'Warning Cannot scanfill, fallback on a triangle fan.'
fill= [ [0, i-1, i] for i in xrange(2, len(indices)) ]
else:
# Use real scanfill.
# See if its flipped the wrong way.
flip= None
for fi in fill:
if flip != None:
break
for i, vi in enumerate(fi):
if vi==0 and fi[i-1]==1:
flip= False
break
elif vi==1 and fi[i-1]==0:
flip= True
break
if not flip:
for i, fi in enumerate(fill):
fill[i]= tuple([ii for ii in reversed(fi)])
return fill
# EG
'''
scn= Scene.GetCurrent()
me = scn.getActiveObject().getData(mesh=1)
ind= [v.index for v | |
iteration, using the parameters
from the previous fit as the starting point and use the intrinsic
scatter estimates provided by :func:`disk_fit_reject`. Covariance is
still ignored.
#. Reject outliers in both velocity and velocity dispersion (if the
latter is being fit) using :func:`disk_fit_reject`. The rejection
sigma used is the *third* element in the provided list. Then refit
the data, but fix or free the center and inclination based on the
provided keywords (``fix_cen`` and ``fix_inc``). Also, as in all
previous iterations, the covariance is ignored in the outlier
rejection and intrinsic scatter determination; however, the
covariance *is* used by the fit, as available and if ``ignore_covar``
is False.
#. Redo the previous iteration in exactly the same way, except outlier
rejection and intrinsic-scatter determination now use the covariance,
as available and if ``ignore_covar`` is False. The rejection sigma
used is the *fourth* element in the provided list.
#. If a lower inclination threshold is set (see ``low_inc``) and the
best-fitting inclination is below this value (assuming the
inclination is freely fit), a final iteration refits the data by
fixing the inclination at the value set by
:func:`~nirvana.data.meta.GlobalPar.guess_inclination`. The code
issues a warning, and the global fit-quality bit is set to include
the ``LOWINC`` bit.
.. todo::
- Enable more rotation curve and dispersion profile functions.
- Allow guess RC and DC parameters and bounds to be input, or switch to
requiring the 1D model class instances to be provided, like in
:class:`~nirvana.models.axisym.AxisymmetricDisk`.
Args:
galmeta (:class:`~nirvana.data.meta.GlobalPar`):
Object with metadata for the galaxy to be fit.
kin (:class:`~nirvana.data.kinematics.Kinematics`):
Object with the data to be fit
rctype (:obj:`str`, optional):
Functional form for the rotation curve. Must be "HyperbolicTangent"
or "PolyEx".
dctype (:obj:`str`, optional):
Functional form for the dispersion profile. Must be "Exponential",
"ExpBase", or "Const".
fitdisp (:obj:`bool`, optional):
Fit the velocity dispersion data if it is available in ``kin``.
ignore_covar (:obj:`bool`, optional):
If ``kin`` provides the covariance between measurements, ignore it
and fit the data assuming there is no covariance.
assume_posdef_covar (:obj:`bool`, optional):
If ``kin`` provides the covariance between measurements, assume the
covariance matrices are positive definite.
max_vel_err (:obj:`float`, optional):
Mask measurements with velocity errors larger than this value. If
None, there is no upper limit on the allowed velocity error.
max_sig_err (:obj:`float`, optional):
Mask measurements with velocity dispersion errors larger than this
value. If None, there is no upper limit on the allowed velocity
dispersion error.
min_vel_snr (:obj:`float`, optional):
Mask velocity measurements for spectra below this S/N. If None,
there is no lower S/N limit on the allowed velocities.
min_sig_snr (:obj:`float`, optional):
Mask velocity dispersion measurements for spectra below this S/N.
If None, there is no lower S/N limit on the allowed velocity
dispersions.
vel_sigma_rej (:obj:`float`, :obj:`list`, optional):
Sigma values used for rejection of velocity measurements. Must be a
single float or a *four-element* list. If None, no rejections are
performed. The description above provides which value is used in
each iteration.
sig_sigma_rej (:obj:`float`, :obj:`list`, optional):
Sigma values used for rejection of dispersion measurements. Must be
a single float or a *four-element* list. If None, no rejections are
performed. The description above provides which value is used in
each iteration.
fix_cen (:obj:`bool`, optional):
Fix the dynamical center of the fit to 0,0 in the final fit
iteration.
fix_inc (:obj:`bool`, optional):
Fix the kinematic inclination of the fit to estimate provided by the
:func:`~nirvana.data.meta.GlobalPar.guess_inclination` method of
``galmeta``.
low_inc (scalar-like, optional):
If the inclination is free and the best-fitting inclination from the
final fit iteration is below this value, flag the global bitmask of
the fit as having a low inclination and refit the data using a fixed
inclination set by
:func:`~nirvana.data.meta.GlobalPar.guess_inclination` (i.e., this
is the same as when setting ``fix_inc`` to True). If None, no
minimum is set on the viable inclination (apart from the fit
boundaries).
min_unmasked (:obj:`int`, optional):
The minimum of velocity measurements (and velocity dispersion
measurements, if they are available and being fit) required to
proceed with the fit, after applying all masking.
select_coherent (:obj:`bool`, optional):
After masking the measurement values, mask any measurements that are
not included in the largest coherent region of adjacent
measurements. See
:func:`~nirvana.data.util.find_largest_coherent_region`.
analytic_jac (:obj:`bool`, optional):
Use the analytic calculation of the Jacobian matrix during the fit
optimization. If False, the Jacobian is calculated using
finite-differencing methods provided by
`scipy.optimize.least_squares`_.
fit_scatter (:obj:`bool`, optional):
Model the intrinsic scatter in the data about the model during the
fit optimization.
verbose (:obj:`int`, optional):
Verbosity level: 0=only status output written to terminal; 1=show
fit result QA plot; 2=full output
Returns:
:obj:`tuple`: Returns 5 objects: (1) the
:class:`~nirvana.models.axisym.AxisymmetricDisk` instance used during
the fit, (2) a `numpy.ndarray`_ with the input guess parameters, (3) a
boolean `numpy.ndarray`_ selecting the parameters that were fixed during
the fit, (4) a `numpy.ndarray`_ with the bad-pixel mask for the velocity
measurements used in the fit, and (5) a `numpy.ndarray`_ with the
bad-pixel mask for the velocity dispersion measurements used in the fit.
"""
# Running in "debug" mode
debug = verbose > 1
# Check input
_vel_sigma_rej = None if vel_sigma_rej is None else list(vel_sigma_rej)
if _vel_sigma_rej is not None and len(_vel_sigma_rej) == 1:
_vel_sigma_rej *= 4
if _vel_sigma_rej is not None and len(_vel_sigma_rej) != 4:
raise ValueError('Length of vel_sigma_rej list must be 4!')
_sig_sigma_rej = None if sig_sigma_rej is None else list(sig_sigma_rej)
if _sig_sigma_rej is not None and len(_sig_sigma_rej) == 1:
_sig_sigma_rej *= 4
if _sig_sigma_rej is not None and len(_sig_sigma_rej) != 4:
raise ValueError('Length of sig_sigma_rej list must be 4!')
#---------------------------------------------------------------------------
# Get the guess parameters and the model parameterizations
print('Setting up guess parameters and parameterization classes.')
# - Geometry
pa, vproj = galmeta.guess_kinematic_pa(kin.grid_x, kin.grid_y, kin.remap('vel'),
return_vproj=True)
p0 = np.array([0., 0., pa, galmeta.guess_inclination(lb=1., ub=89.), 0.])
# - Rotation Curve
rc = None
if rctype == 'HyperbolicTangent':
# TODO: Maybe want to make the guess hrot based on the effective radius...
p0 = np.append(p0, np.array([min(900., vproj), 1.]))
rc = HyperbolicTangent(lb=np.array([0., 1e-3]),
ub=np.array([1000., max(5., kin.max_radius())]))
elif rctype == 'PolyEx':
p0 = np.append(p0, np.array([min(900., vproj), 1., 0.1]))
rc = PolyEx(lb=np.array([0., 1e-3, -1.]),
ub=np.array([1000., max(5., kin.max_radius()), 1.]))
else:
raise ValueError(f'Unknown RC parameterization: {rctype}')
# - Dispersion profile
dc = None
if fitdisp:
sig0 = galmeta.guess_central_dispersion(kin.grid_x, kin.grid_y, kin.remap('sig'))
# For disks, 1 Re = 1.7 hr (hr = disk scale length). The dispersion
# e-folding length is ~2 hr, meaning that I use a guess of 2/1.7 Re for
# the dispersion e-folding length.
if dctype == 'Exponential':
p0 = np.append(p0, np.array([sig0, 2*galmeta.reff/1.7]))
dc = Exponential(lb=np.array([0., 1e-3]), ub=np.array([1000., 3*galmeta.reff]))
elif dctype == 'ExpBase':
p0 = np.append(p0, np.array([sig0, 2*galmeta.reff/1.7, 1.]))
dc = ExpBase(lb=np.array([0., 1e-3, 0.]), ub=np.array([1000., 3*galmeta.reff, 100.]))
elif dctype == 'Const':
p0 = np.append(p0, np.array([sig0]))
dc = Const(lb=np.array([0.]), ub=np.array([1000.]))
# Report
print(f'Rotation curve parameterization class: {rc.__class__.__name__}')
if fitdisp:
print(f'Dispersion profile parameterization class: {dc.__class__.__name__}')
print('Input guesses:')
print(f' Position angle: {pa:.1f}')
print(f' Inclination: {p0[3]:.1f}')
print(f' Projected Rotation Speed: {vproj:.1f}')
if fitdisp:
print(f' Central Velocity Dispersion: {sig0:.1f}')
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# Define the fitting object
disk = AxisymmetricDisk(rc=rc, dc=dc)
# Constrain the center to be in the middle third of the map relative to the
# photometric center. The mean in the calculation is to mitigate that some
# galaxies can be off center, but the detail here and how well it works
# hasn't been well tested.
# TODO: Should this use grid_x instead, so that it's more uniform for all
# IFUs? Or should this be set as a fraction of Reff?
dx = np.mean([abs(np.amin(kin.x)), abs(np.amax(kin.x))])
dy = np.mean([abs(np.amin(kin.y)), abs(np.amax(kin.y))])
lb, ub = disk.par_bounds(base_lb=np.array([-dx/3, -dy/3, -350., 1., -500.]),
base_ub=np.array([dx/3, dy/3, 350., 89., 500.]))
print(f'If free, center constrained within +/- {dx/3:.1f} in X and +/- {dy/3:.1f} in Y.')
# TODO: Handle these issues instead of faulting
if np.any(np.less(p0, lb)):
raise ValueError('Parameter lower bounds cannot accommodate initial guess value!')
if np.any(np.greater(p0, ub)):
raise ValueError('Parameter upper bounds cannot accommodate initial guess value!')
#---------------------------------------------------------------------------
| |
# encoding: utf-8
import MySQLdb as mdb
import time
import datetime
import logging
import json
from django.conf import settings
from django.http import HttpResponse, JsonResponse
from django.views.generic import CreateView, DeleteView, ListView, View
from django.conf import settings
from .models import Picture
from .response import JSONResponse, response_mimetype
from .serialize import serialize
import canales as can
logger = logging.getLogger(__name__)
database = settings.DATABASES['default']
regex = r'(^[^;]+)(.*)(?:\);)(.*$)?'
class PictureCreateView(CreateView):
model = Picture
fields = "__all__"
def form_valid(self, form):
self.object = form.save()
files = [serialize(self.object)]
data = {'files': files}
response = JSONResponse(data, mimetype=response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
def form_invalid(self, form):
data = json.dumps(form.errors)
return HttpResponse(content=data, status=400,
content_type='application/json')
class PictureDeleteView(DeleteView):
model = Picture
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
response = JSONResponse(True, mimetype=response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
class PictureListView(ListView):
model = Picture
def render_to_response(self, context, **response_kwargs):
files = [serialize(p) for p in self.get_queryset()]
data = {'files': files}
response = JSONResponse(data, mimetype=response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
class ProcessCsvView(View):
def get(self, request, *args, **kwargs):
resource_id = request.path.split('/')[2]
csv = Picture.objects.get(pk=resource_id)
ts = csv.timestamp.strftime("%Y%m%d%H%M%S")
tableName = 'nw' + csv.file_type + ts
response = can.process(settings.MEDIA_ROOT + '/' + csv.__str__(),
tableName)
return JsonResponse(response, safe=False)
class PostProcessView(View):
def __init__(self):
pass
def get(self, request, *args, **kwargs):
salida = {}
files = request.GET.copy()
TABLECAS = ""
TABLEPAT = ""
TABLEPATCAS = ""
TS = datetime.datetime.fromtimestamp(
time.time()).strftime('%Y%m%d%H%M%S')
DATEFORMATTED = datetime.datetime.fromtimestamp(
time.time()).strftime('%B %d, %Y at %H:%M:%S')
REPORTDIFF = "DIFFERENCES - "+DATEFORMATTED
REPORTFULL = "FULL REPORT - "+DATEFORMATTED
sqlReport = ("INSERT INTO `explorer_query` "
"VALUES (@MAXID,'@REPORTNAME', "
"'SELECT\r\n /* Req */ p.First_Name as `*First Name`, substring(p.Middle_Name,1,1) as `Middle Initial`, \r\n "
"/* Req */p.Last_Name as `*Last Name`, p.Date_of_Birth as `*DOB`,\r\n "
"/* Req */CASE p.Sex WHEN \"Female\" THEN \"F\" WHEN \"Male\" THEN \"M\" ELSE \"U\" END AS `*Gender`,\r\n "
"/* Req */p.Social_Security_Number as SSN, p.Street_1 as `*Address 1`, \r\n p.Street_2 as `Address 2`,\r\n "
"/* Req */p.City as `*City`, p.State as `*State`, p.Zip_Code as `*ZIP`, \r\n "
"IF(c.Marital_Status IS NULL, \"Single\", CASE c.Marital_Status WHEN \"Unknown\" THEN \"Single\" \r\n "
"WHEN \"\" THEN \"Single\" WHEN \"Divorced\" THEN \"D(Divorced)\" WHEN \"Widowed\" THEN \"W(Widowed)\" \r\n "
"ELSE c.Marital_Status END) AS `Marital Status`, CASE p.Employment_Status WHEN \"Full time\" THEN \"Employed\" \r\n "
"WHEN \"Not employed\" THEN \"Unemployed\" ELSE \"Other\" END AS `Employment Status`, \r\n "
"p.Chart_Number as `Chart No.`, p.Signature_on_File as `Signature on File`, \r\n "
"/* Req */coalesce(p.Phone_1, p.Contact_Phone_1, \"000-000-0000\") as `*Home Phone`, \r\n "
"p.Work_Phone as `Work Phone`, \"\" as `Cell Phone`, p.EMail, \r\n "
"CASE p.Race WHEN \"W\" THEN \"White\" WHEN \"I\" THEN \"American Indian or Alaska Native\" \r\n "
"WHEN \"E\" THEN \"Patient Declined\" ELSE \"Hispanic or Latino\" END AS Race, \r\n "
"CASE p.Ethnicity WHEN \"H\" THEN \"Hispanic or Latino\" \r\n "
"ELSE \"Hispanic or Latino\" END AS Ethnicity,\r\n \r\n "
### ASEGURADORA UNO
"/* INSURANCE NUMBER 1 DATA */\r\n "
"Coalesce(i1.INTERNAL_ID_JUAN, IF(c.Insurance_Carrier_1 IS NULL, NULL,\r\n "
"CONCAT(\"OLD \",c.Insurance_Carrier_1))) as `HF Payer ID (P)`,\r\n "
"/*Falta*/ \"\" as `Insurance Type (P)`, \r\n "
"c.Group_Name_1 as `Group Name (P)`, c.Group_Number_1 as `Group Number (P)`, \r\n "
"c.Policy_Number_1 as `Insured''s I D Number (P)`, c.Copayment_Amount as `Co-Pay (P)`, \r\n "
"c.Accept_Assignment_1 as `Insured''s Accept Assignment (P)`, c.Insured_Relationship_1 as `Patient''s Relationship (P)`, \r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE p1.First_Name END as `Insured''s First Name (P)`, \r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE p1.Middle_Initial END as `Insured''s Middle Initial (P)`, \r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE p1.Last_Name END as `Insured''s Last Name (P)`, \r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE p1.Date_of_Birth END as `Insured''s DOB (P)`, \r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE CASE p1.Sex WHEN \"Female\" THEN \"F\" WHEN \"Male\" THEN \"M\" ELSE \"U\" END END AS `Insured''s Gender (P)`,\r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_1 IS NULL, NULL, COALESCE(p1.Street_1, p.Street_1)) END as `Insured''s Address 1 (P)`, \r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_1 IS NULL, NULL, COALESCE(p1.Street_2, p.Street_2)) END as `Insured''s Address 2 (P)`, \r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_1 IS NULL, NULL, COALESCE(p1.City, p.City)) END as `Insured''s City (P)`, \r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_1 IS NULL, NULL, COALESCE(p1.State,p.State)) END as `Insured''s State (P)`, \r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_1 IS NULL, NULL, COALESCE(p1.Zip_Code, p.Zip_Code)) END as `Insured''s Zip (P)`, \r\n "
"case c.Insured_Relationship_1 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_1 IS NULL, NULL, COALESCE(p1.Phone_1, p.Phone_1, \"000-000-0000\")) END as `Insured''s Phone (P)`,\r\n "
"c.Prior_Authorization_No as `Authorization No (P)`, \r\n \r\n "
### EMERGENCY
"/* EMERGENCY CONTACT DATA */\r\n "
"LEFT(p.Contact_Name, length(p.Contact_Name) - locate(\" \", reverse(p.Contact_Name))) as `Emergency First Name`, \r\n "
"SUBSTRING_INDEX(p.Contact_Name, \" \", -1) as `Emergency Last Name`, \r\n "
"COALESCE(p.Contact_Phone_1, p.Contact_Phone_2) as `Emergency Phone`, \"O\" as `Emergency Relation`,\r\n \r\n "
### ASEGURADORA DOS
"/* INSURANCE NUMBER 2 DATA */\r\n "
"Coalesce(i2.INTERNAL_ID_JUAN, IF(c.Insurance_Carrier_2 IS NULL, NULL,\r\n "
"CONCAT(\"OLD \",c.Insurance_Carrier_2))) as `HF Payer ID (S)`,\r\n "
"/*Falta*/ \"\" as `Insurance Type (S)`, \r\n "
"c.Group_Name_2 as `Group Name (S)`, c.Group_Number_2 as `Group Number (S)`, \r\n "
"c.Policy_Number_2 as `Insured''s ID Number (S)`, /*Falta*/ \"\" as `Co-Pay (S)`, \r\n "
"c.Accept_Assignment_2 as `Insured''s Accept Assignment (S)`, c.Insured_Relationship_2 as `Patient''s Relationship (S)`, \r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE p2.First_Name END as `Insured''s First Name (S)`, \r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE p2.Middle_Initial END as `Insured''s Middle Initial (S)`, \r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE p2.Last_Name END as `Insured''s Last Name (S)`, \r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE p2.Date_of_Birth END as `Insured''s DOB (S)`, \r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE CASE p2.Sex WHEN \"Female\" THEN \"F\" WHEN \"Male\" THEN \"M\" ELSE \"U\" END END AS `Insured''s Gender (S)`,\r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_2 IS NULL, NULL, COALESCE(p2.Street_1, p.Street_1)) END as `Insured''s Address 1 (S)`, \r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_2 IS NULL, NULL, COALESCE(p2.Street_2, p.Street_2)) END as `Insured''s Address 2 (S)`, \r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_2 IS NULL, NULL, COALESCE(p2.City, p.City)) END as `Insured''s City (S)`, \r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_2 IS NULL, NULL, COALESCE(p2.State,p.State)) END as `Insured''s State (S)`, \r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_2 IS NULL, NULL, COALESCE(p2.Zip_Code, p.Zip_Code)) END as `Insured''s Zip (S)`, \r\n "
"case c.Insured_Relationship_2 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_2 IS NULL, NULL, COALESCE(p2.Phone_1, p.Phone_1, \"000-000-0000\")) END as `Insured''s Phone (S)`,\r\n "
"/*Falta*/ \"\" as `Authorization No (S)`,\r\n \r\n "
### ASEGURADORA TRES
"/* INSURANCE NUMBER 3 DATA */\r\n "
"Coalesce(i3.INTERNAL_ID_JUAN, IF(c.Insurance_Carrier_3 IS NULL, NULL,\r\n "
"CONCAT(\"OLD \",c.Insurance_Carrier_3))) as `HF Payer ID (T)`,\r\n "
"/*Falta*/ \"\" as `Insurance Type (T)`, \r\n "
"c.Group_Name_3 as `Group Name (T)`, c.Group_Number_3 as `Group Number (T)`, \r\n "
"c.Policy_Number_3 as `Insured''s ID Number (T)`, /*Falta*/ \"\" as `Co-Pay (T)`, \r\n "
"c.Accept_Assignment_3 as `Insured''s Accept Assignment (T)`, c.Insured_Relationship_3 as `Patient''s Relationship (T)`, \r\n "
"case c.Insured_Relationship_3 WHEN \"Self\" Then \"\" ELSE p3.First_Name END as `Insured''s First Name (T)`, \r\n "
"case c.Insured_Relationship_3 WHEN \"Self\" Then \"\" ELSE p3.Middle_Initial END as `Insured''s Middle Initial (T)`, \r\n "
"case c.Insured_Relationship_3 WHEN \"Self\" Then \"\" ELSE p3.Last_Name END as `Insured''s Last Name (T)`, \r\n "
"case c.Insured_Relationship_3 WHEN \"Self\" Then \"\" ELSE p3.Date_of_Birth END as `Insured''s DOB (T)`, \r\n "
"case c.Insured_Relationship_3 WHEN \"Self\" Then \"\" ELSE CASE p3.Sex WHEN \"Female\" THEN \"F\" WHEN \"Male\" THEN \"M\" ELSE \"U\" END END AS `Insured''s Gender (T)`,\r\n "
"case c.Insured_Relationship_3 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_3 IS NULL, NULL, COALESCE(p3.Street_1, p.Street_1)) END as `Insured''s Address 1 (T)`, \r\n "
"case c.Insured_Relationship_3 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_3 IS NULL, NULL, COALESCE(p3.Street_2, p.Street_2)) END as `Insured''s Address 2 (T)`, \r\n "
"case c.Insured_Relationship_3 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_3 IS NULL, NULL, COALESCE(p3.City, p.City)) END as `Insured''s City (T)`, \r\n "
"case c.Insured_Relationship_3 WHEN \"Self\" Then \"\" ELSE IF(c.Insured_3 IS NULL, | |
<filename>grblocalization/GRBToyModel3D.py<gh_stars>1-10
###################################################################################################
#
# GRBToyModel.py
#
# Copyright (C) by <NAME>, <NAME> & <NAME>.
# All rights reserved.
#
# Please see the file LICENSE in the main repository for the copyright-notice.
#
###################################################################################################
###################################################################################################
import tensorflow as tf
import numpy as np
#from mpl_toolkits.mplot3d import Axes3D
#import matplotlib.pyplot as plt
import random
import signal
import sys
import time
import math
import csv
import os
from datetime import datetime
from functools import reduce
import ROOT as M
# Load MEGAlib into ROOT so that it is usable
M.gSystem.Load("$(MEGALIB)/lib/libMEGAlib.so")
print("\nGRB localization toy model (tensorflow based) \n")
###################################################################################################
# Step 1: Input parameters
###################################################################################################
# User input parameters
NumberOfComptonEvents = 2000
NumberOfBackgroundEvents = 2000
NumberOfTrainingLocations = 32*1024
NumberOfTestLocations = 1024
MaxBatchSize = 128
ResolutionInDegrees = 5
OneSigmaNoiseInRadians = math.radians(0.0)
OutputDirectory = "Output"
# Set derived parameters
NumberOfTrainingBatches= (int) (NumberOfTrainingLocations / MaxBatchSize)
TrainingBatchSize = (int) (NumberOfTrainingLocations / NumberOfTrainingBatches)
if TrainingBatchSize > MaxBatchSize:
print("Error: Training batch size larger than {}: {}".format(MaxBatchSize, TrainingBatchSize))
sys.exit(0)
NumberOfTestingBatches= (int) (NumberOfTestLocations / MaxBatchSize)
TestingBatchSize = (int) (NumberOfTestLocations / NumberOfTestingBatches)
if TrainingBatchSize > MaxBatchSize:
print("Error: Testing batch size larger than {}: {}".format(MaxBatchSize, TestingBatchSize))
sys.exit(0)
ThetaMin = 0
ThetaMax = np.pi
ThetaBins = int(180 / ResolutionInDegrees)
ChiMin = 0
ChiMax = np.pi
ChiBins = int(180 / ResolutionInDegrees)
PsiMin = -np.pi
PsiMax = +np.pi
PsiBins = int(360 / ResolutionInDegrees)
InputDataSpaceSize = ThetaBins * ChiBins * PsiBins
OutputDataSpaceSize = 2
if os.path.exists(OutputDirectory):
Now = datetime.now()
OutputDirectory += Now.strftime("_%Y%m%d_%H%M%S")
os.makedirs(OutputDirectory)
###################################################################################################
# Step 2: Global functions
###################################################################################################
# Take care of Ctrl-C
Interrupted = False
NInterrupts = 0
def signal_handler(signal, frame):
global Interrupted
Interrupted = True
global NInterrupts
NInterrupts += 1
if NInterrupts >= 2:
print("Aborting!")
sys.exit(0)
print("You pressed Ctrl+C - waiting for graceful abort, or press Ctrl-C again, for quick exit.")
signal.signal(signal.SIGINT, signal_handler)
###################################################################################################
# Step 3: Create some training, test & verification data sets
###################################################################################################
print("Info: Creating {:,} Compton events".format((NumberOfTrainingLocations + NumberOfTestLocations) * (NumberOfComptonEvents + NumberOfBackgroundEvents)))
def KleinNishina(Ei, phi):
if Ei <= 0:
#print("Error: Invalid input: Ei <= 0")
return 0
if phi < 0 or phi > math.pi:
#print("Error: Invalid input: phi < 0 or phi > math.pi")
return 0
Radius = 2.8E-15
E0 = 510.998910
sinphi = math.sin(phi)
Eg = -E0*Ei/(math.cos(phi)*Ei-Ei-E0)
return 0.5*Radius*Radius*Eg*Eg/Ei/Ei*(Eg/Ei+Ei/Eg-sinphi*sinphi)*sinphi
def ComptonScatterAngle(Eg, Ee):
E0 = 510.998910
Value = 1 - E0 * (1.0/Eg - 1.0/(Ee + Eg))
if Value <= -1 or Value >= 1:
#print("Error: Invalid input: Value <= -1 or Value >= 1")
return 0
return math.acos(Value)
def Create(Ei, Rotation):
# Simulate the gamma ray according to Butcher & Messel: Nuc Phys 20(1960), 15
Ei_m = Ei / 510.998910
Epsilon = 0.0
EpsilonSquare = 0.0
OneMinusCosTheta = 0.0
SinThetaSquared = 0.0
Epsilon0 = 1./(1. + 2.*Ei_m)
Epsilon0Square = Epsilon0*Epsilon0
Alpha1 = - math.log(Epsilon0)
Alpha2 = 0.5*(1.- Epsilon0Square)
Reject = 0.0
while True:
if Alpha1/(Alpha1+Alpha2) > random.random():
Epsilon = math.exp(-Alpha1*random.random())
EpsilonSquare = Epsilon*Epsilon
else:
EpsilonSquare = Epsilon0Square + (1.0 - Epsilon0Square)*random.random()
Epsilon = math.sqrt(EpsilonSquare)
OneMinusCosTheta = (1.- Epsilon)/(Epsilon*Ei_m)
SinThetaSquared = OneMinusCosTheta*(2.-OneMinusCosTheta)
Reject = 1.0 - Epsilon*SinThetaSquared/(1.0 + EpsilonSquare)
if Reject < random.random():
break
CosTeta = 1.0 - OneMinusCosTheta;
# Set the new photon parameters --- direction is random since we didn't give a start direction
Theta = np.arccos(1 - 2*random.random()) # Compton scatter angle since on axis
Phi = 2.0 * np.pi * random.random();
Dg = M.MVector()
Dg.SetMagThetaPhi(1.0, Theta, Phi)
Dg = Rotation * Dg
Chi = Dg.Theta()
Psi = Dg.Phi()
Eg = Epsilon*Ei
Ee = Ei - Eg
#print(Psi, Chi, Theta, Eg+Ee)
return Chi, Psi, Theta, Eg+Ee
# Dummy noising of the data
def Noise(Chi, Psi, Theta, NoiseOneSigmaInRadians):
NoisedChi = sys.float_info.max
while NoisedChi < 0 or NoisedChi > math.pi:
NoisedChi = np.random.normal(Chi, NoiseOneSigmaInRadians)
#print("Chi: {} {}".format(Chi, NoisedChi))
NoisedPsi = sys.float_info.max
while NoisedPsi < -math.pi or NoisedPsi > math.pi:
NoisedPsi = np.random.normal(Psi, NoiseOneSigmaInRadians)
#print("Psi {} {}".format(Psi, NoisedPsi))
NoisedTheta = sys.float_info.max
while NoisedTheta < 0 or NoisedTheta > math.pi:
NoisedTheta = np.random.normal(Theta, NoiseOneSigmaInRadians)
#print("Theta {} {}".format(Theta, NoisedTheta))
return NoisedChi, NoisedPsi, NoisedTheta
def GenerateOneDataSet(Index):
DataSet = np.zeros(shape=(ThetaBins, ChiBins, PsiBins, 1))
if Index > 0 and Index % 1024 == 0:
print("Created data sets: {}".format(Index))
# Create a random rotation matrix
V = M.MVector()
V.SetMagThetaPhi(1, np.arccos(1 - 2*random.random()), 2.0 * np.pi * random.random())
Angle = 2.0 * np.pi * random.random()
'''
if random.random() < 0.25:
V.SetMagThetaPhi(1, 0.4, 0.1)
Angle = 0.6
elif random.random() < 0.5:
V.SetMagThetaPhi(1, 0.9, 0.3)
Angle = 4.6
elif random.random() < 0.75:
V.SetMagThetaPhi(1, 0.4, 0.8)
Angle = 2.6
else:
V.SetMagThetaPhi(1, 0.2, 0.6)
Angle = 0.2
'''
Rotation = M.MRotation(Angle, V)
# Retrieve the origin of the gamma rays
Origin = M.MVector(0, 0, 1)
Origin = Rotation*Origin
# Create the input source events
for e in range(0, NumberOfComptonEvents):
Chi, Psi, Theta, Energy = Create(511, Rotation)
#print("{}, {}, {}".format(Chi, Psi, Theta))
if OneSigmaNoiseInRadians > 0:
Chi, Psi, Theta = Noise(Chi, Psi, Theta, OneSigmaNoiseInRadians)
ChiBin = (int) (((Chi - ChiMin) / (ChiMax - ChiMin)) * ChiBins)
PsiBin = (int) (((Psi - PsiMin) / (PsiMax - PsiMin)) * PsiBins)
ThetaBin = (int) (((Theta - ThetaMin) / (ThetaMax - ThetaMin)) * ThetaBins)
DataSet[ThetaBin, ChiBin, PsiBin] += 1
# Create input background events
for e in range(0, NumberOfBackgroundEvents):
ChiBin = random.randint(0, ChiBins-1)
PsiBin = random.randint(0, PsiBins-1)
ThetaBin = random.randint(0, ThetaBins-1)
DataSet[ThetaBin, ChiBin, PsiBin] += 1
return Origin.Theta(), Origin.Phi(), DataSet
# Parallelizing using Pool.starmap()
import multiprocessing as mp
# Create data sets
pool = mp.Pool(mp.cpu_count())
DataSetTrain = pool.map(GenerateOneDataSet, [l for l in range(0, NumberOfTrainingLocations)])
pool.close()
print("Info: Created {:,} training data sets. Now prepping them for Tensorflow.".format(NumberOfTrainingLocations))
# Convert the data set into training and testing data
XTrain = np.zeros(shape=(NumberOfTrainingLocations, ThetaBins, ChiBins, PsiBins, 1))
YTrain = np.zeros(shape=(NumberOfTrainingLocations, OutputDataSpaceSize))
for l in range(0, NumberOfTrainingLocations):
YTrain[l, 0] = DataSetTrain[l][0]
YTrain[l, 1] = DataSetTrain[l][1]
XTrain[l] = DataSetTrain[l][2]
'''
print(type(XTrain[l]))
print(XTrain[l].shape)
if YTrain[l, 1] > 3.0:
# Plot the first test data point
print("Pos {}, {}".format(YTrain[l, 0], YTrain[l, 1]))
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
adds = 0
for t in range(0, ThetaBins):
for c in range(0, ChiBins):
for p in range(0, PsiBins):
if XTrain[l, t, c, p] > 0:
ax.scatter(math.degrees(PsiMin) + p * ResolutionInDegrees, math.degrees(ChiMin) + c * ResolutionInDegrees, math.degrees(ThetaMin) + t * ResolutionInDegrees, XTrain[l, t, c, p])
print("{}, {}, {}".format(math.degrees(PsiMin) + p * ResolutionInDegrees, math.degrees(ChiMin) + c * ResolutionInDegrees, math.degrees(ThetaMin) + t * ResolutionInDegrees))
adds += XTrain[l, t, c, p]
print("Adds: {}".format(adds))
plt.show()
plt.pause(0.001)
input("Press [enter] to EXIT")
sys.exit()
'''
del DataSetTrain
#for l in range(NumberOfTrainingLocations, NumberOfTrainingLocations + NumberOfTestLocations):
#YTest[l - NumberOfTrainingLocations, 0] = DataSet[l][0]
#YTest[l - NumberOfTrainingLocations, 1] = DataSet[l][1]
#XTest[l - NumberOfTrainingLocations] = DataSet[l][2]
# Create data sets
pool = mp.Pool(mp.cpu_count())
DataSetTest = pool.map(GenerateOneDataSet, [l for l in range(0, NumberOfTestLocations)])
pool.close()
print("Info: Created {:,} testing data sets. Now prepping them for Tensorflow.".format(NumberOfTestLocations))
XTest = np.zeros(shape=(NumberOfTestLocations, ThetaBins, ChiBins, PsiBins, 1))
YTest = np.zeros(shape=(NumberOfTestLocations, OutputDataSpaceSize))
for l in range(NumberOfTestLocations):
YTest[l, 0] = DataSetTest[l][0]
YTest[l, 1] = DataSetTest[l][1]
XTest[l] = DataSetTest[l][2]
del DataSetTest
###################################################################################################
# Step 4: Setting up the neural network
###################################################################################################
print("Info: Setting up neural network...")
# Placeholders
print(" ... placeholders ...")
X = tf.placeholder(tf.float32, [None, ThetaBins, ChiBins, PsiBins, 1], name="X")
Y = tf.placeholder(tf.float32, [None, OutputDataSpaceSize], name="Y")
L = tf.layers.conv3d(X, 64, 5, 2, 'VALID')
#L = tf.layers.batch_normalization(L, training=tf.placeholder_with_default(True, shape=None))
#L = tf.maximum(L, 0.1*L)
L = tf.layers.conv3d(L, 64, 3, 1, 'VALID')
#L = tf.layers.batch_normalization(L, training=tf.placeholder_with_default(True, shape=None))
#L = tf.maximum(L, 0.1*L)
L = tf.layers.conv3d(L, 128, 2, 2, 'VALID')
#L = tf.layers.batch_normalization(L, training=tf.placeholder_with_default(True, shape=None))
#L = tf.maximum(X, 0.1*X)
L = tf.layers.conv3d(L, 128, 2, 2, 'VALID')
#L = tf.layers.batch_normalization(L, training=tf.placeholder_with_default(True, shape=None))
#L = tf.maximum(L, 0.1*L)
L = tf.layers.dense(tf.reshape(L, [-1, reduce(lambda a,b:a*b, L.shape.as_list()[1:])]), 128)
#L = tf.layers.batch_normalization(L, training=tf.placeholder_with_default(True, shape=None))
L = tf.nn.relu(L)
print(" ... output layer ...")
Output = tf.layers.dense(tf.reshape(L, [-1, reduce(lambda a,b:a*b, L.shape.as_list()[1:])]), OutputDataSpaceSize)
#tf.print("Y: ", Y, output_stream=sys.stdout)
# Loss function - simple linear distance between output and ideal results
print(" ... loss function ...")
LossFunction = tf.reduce_sum(np.abs(Output - Y)/NumberOfTestLocations)
#LossFunction = tf.reduce_sum(tf.pow(Output - Y, 2))/NumberOfTestLocations
#LossFunction = tf.losses.mean_squared_error(Output, Y)
# Minimizer
print(" ... minimizer ...")
Trainer = tf.train.AdamOptimizer().minimize(LossFunction)
# Create and initialize the session
print(" ... session ...")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(" ... listing uninitialized variables if there are any ...")
print(tf.report_uninitialized_variables())
print(" ... writer ...")
writer = tf.summary.FileWriter(OutputDirectory, sess.graph)
writer.close()
# Add ops to save and restore all the variables.
print(" ... saver ...")
Saver = tf.train.Saver()
###################################################################################################
# Step 5: Training and evaluating the network
###################################################################################################
print("Info: Training and evaluating the network")
# Train the network
MaxTimesNoImprovement = 1000
TimesNoImprovement = 0
BestMeanSquaredError = sys.float_info.max
BestMeanAngularDeviation = sys.float_info.max
BestRMSAngularDeviation = sys.float_info.max
BestLoss = sys.float_info.max
IterationOutputInterval = 10
CheckPointNum = 0
print("Creating progress file")
with open(OutputDirectory + "/Progress.txt", 'w') as f:
f.write('')
def CheckPerformance():
| |
<reponame>hughbg/hera_sim<filename>hera_sim/simulate.py<gh_stars>10-100
"""Module containing a high-level interface for :mod:`hera_sim`.
This module defines the :class:`Simulator` class, which provides the user
with a high-level interface to all of the features provided by :mod:`hera_sim`.
For detailed instructions on how to manage a simulation using the
:class:`Simulator`, please refer to the tutorials.
"""
from cached_property import cached_property
import functools
import inspect
import warnings
import yaml
import time
from pathlib import Path
from deprecation import deprecated
import numpy as np
from pyuvdata import UVData
from astropy import constants as const
from typing import Type, Union, Tuple, Sequence, Optional, Dict
from . import io
from . import utils
from .defaults import defaults
from . import __version__
from .components import SimulationComponent, get_model, list_all_components
_add_depr = deprecated(
deprecated_in="1.0", removed_in="2.0", details="Use the :meth:`add` method instead."
)
# Define some commonly used types for typing purposes.
AntPairPol = Tuple[int, int, str]
AntPair = Tuple[int, int]
AntPol = Tuple[int, str]
Component = Union[str, Type[SimulationComponent], SimulationComponent]
# wrapper for the run_sim method, necessary for part of the CLI
def _generator_to_list(func, *args, **kwargs):
@functools.wraps(func)
def new_func(*args, **kwargs):
result = list(func(*args, **kwargs))
return None if result == [] else result
return new_func
class Simulator:
"""Simulate visibilities and/or instrumental effects for an entire array.
Parameters
----------
data
:class:`pyuvdata.UVData` object to use for the simulation or path to a
UVData-supported file.
defaults_config
Path to defaults configuraiton, seasonal keyword, or configuration
dictionary for setting default simulation parameters. See tutorial
on setting defaults for further information.
redundancy_tol
Position tolerance for finding redundant groups, in meters. Default is
1 meter.
kwargs
Parameters to use for initializing UVData object if none is provided.
If ``data`` is a file path, then these parameters are used when reading
the file. Otherwise, the parameters are used in creating a ``UVData``
object using :func:`~.io.empty_uvdata`.
Attributes
----------
data : :class:`pyuvdata.UVData` instance
Object containing simulated visibilities and metadata.
extras : dict
Dictionary to use for storing extra parameters.
antpos : dict
Dictionary pairing antenna numbers to ENU positions in meters.
lsts : np.ndarray of float
Observed LSTs in radians.
freqs : np.ndarray of float
Observed frequencies in GHz.
times : np.ndarray of float
Observed times in JD.
pols : list of str
Polarization strings.
red_grps : list of list of int
Redundant baseline groups. Each entry is a list containing the baseline
integer for each member of that redundant group.
red_vecs : list of :class:`numpy.ndarray` of float
Average of all the baselines for each redundant group.
red_lengths : list of float
Length of each redundant baseline.
"""
def __init__(
self,
*,
data: Optional[Union[str, UVData]] = None,
defaults_config: Optional[Union[str, dict]] = None,
redundancy_tol: float = 1.0,
**kwargs,
):
# TODO: add ability for user to specify parameter names to look for on
# parsing call signature
# Create some utility dictionaries.
self._components = {}
self._seeds = {}
self._antpairpol_cache = {}
self._filter_cache = {
"delay": {},
"fringe": {},
}
# apply and activate defaults if specified
if defaults_config:
self.apply_defaults(defaults_config)
# actually initialize the UVData object stored in self.data
self._initialize_data(data, **kwargs)
self._calculate_reds(tol=redundancy_tol)
self.extras = self.data.extra_keywords
for param in ("Ntimes", "Nfreqs", "Nblts", "Npols", "Nbls"):
setattr(self, param, getattr(self.data, param))
self.Nants = len(self.antpos)
for attr in ("data", "flags", "antpairs", "antpairpols"):
setattr(
self,
f"get_{attr}",
getattr(self.data, f"get_{attr}"),
)
@cached_property
def antpos(self):
"""Mapping between antenna numbers and ENU positions in meters."""
antpos, ants = self.data.get_ENU_antpos(pick_data_ants=True)
return dict(zip(ants, antpos))
@cached_property
def lsts(self):
"""Observed Local Sidereal Times in radians."""
# This process retrieves the unique LSTs while respecting phase wraps.
_, unique_inds = np.unique(self.data.time_array, return_index=True)
return self.data.lst_array[unique_inds]
@cached_property
def freqs(self):
"""Frequencies in GHz."""
return np.unique(self.data.freq_array) / 1e9
@cached_property
def times(self):
"""Simulation times in JD."""
return np.unique(self.data.time_array)
@cached_property
def pols(self):
"""Array of polarization strings."""
return self.data.get_pols()
def apply_defaults(self, config: Optional[Union[str, dict]], refresh: bool = True):
"""
Apply the provided default configuration.
Equivalent to calling :meth:`~hera_sim.defaults.set` with the same parameters.
Parameters
----------
config
If given, either a path pointing to a defaults configuration
file, a string identifier of a particular config (e.g. 'h1c')
or a dictionary of configuration parameters
(see :class:`~.defaults.Defaults`).
refresh
Whether to refresh the defaults.
"""
defaults.set(config, refresh=refresh)
def calculate_filters(
self,
*,
delay_filter_kwargs: Optional[Dict[str, Union[float, str]]] = None,
fringe_filter_kwargs: Optional[Dict[str, Union[float, str, np.ndarray]]] = None,
):
"""
Pre-compute fringe-rate and delay filters for the entire array.
Parameters
----------
delay_filter_kwargs
Extra parameters necessary for generating a delay filter. See
:func:`utils.gen_delay_filter` for details.
fringe_filter_kwargs
Extra parameters necessary for generating a fringe filter. See
:func:`utils.gen_fringe_filter` for details.
"""
delay_filter_kwargs = delay_filter_kwargs or {}
fringe_filter_kwargs = fringe_filter_kwargs or {}
self._calculate_delay_filters(**delay_filter_kwargs)
self._calculate_fringe_filters(**fringe_filter_kwargs)
def add(
self,
component: Component,
*,
add_vis: bool = True,
ret_vis: bool = False,
seed: Optional[Union[str, int]] = None,
vis_filter: Optional[Sequence] = None,
component_name: Optional[str] = None,
**kwargs,
) -> Optional[Union[np.ndarray, Dict[int, np.ndarray]]]:
"""
Simulate an effect then apply and/or return the result.
Parameters
----------
component
Effect to be simulated. This can either be an alias of the effect,
or the class (or instance thereof) that simulates the effect.
add_vis
Whether to apply the effect to the simulated data. Default is True.
ret_vis
Whether to return the simulated effect. Nothing is returned by default.
seed
How to seed the random number generator. Can either directly provide
a seed as an integer, or use one of the supported keywords. See
tutorial for using the :class:`Simulator` for supported seeding modes.
Default is to use a seed based on the current random state.
vis_filter
Iterable specifying which antennas/polarizations for which the effect
should be simulated. See tutorial for using the :class:`Simulator` for
details of supported formats and functionality.
component_name
Name to use when recording the parameters used for simulating the effect.
Default is to use the name of the class used to simulate the effect.
**kwargs
Optional keyword arguments for the provided ``component``.
Returns
-------
effect
The simulated effect; only returned if ``ret_vis`` is set to ``True``.
If the simulated effect is multiplicative, then a dictionary mapping
antenna numbers to the per-antenna effect (as a ``np.ndarray``) is
returned. Otherwise, the effect for the entire array is returned with
the same structure as the ``pyuvdata.UVData.data_array`` that the
data is stored in.
"""
# Obtain a callable reference to the simulation component model.
model = self._get_component(component)
model_key = (
component_name if component_name else self._get_model_name(component)
)
if not isinstance(model, SimulationComponent):
model = model(**kwargs)
self._sanity_check(model) # Check for component ordering issues.
self._antpairpol_cache[model_key] = [] # Initialize this model's cache.
if seed is None and add_vis:
warnings.warn(
"You have not specified how to seed the random state. "
"This effect might not be exactly recoverable."
)
# Simulate the effect by iterating over baselines and polarizations.
data = self._iteratively_apply(
model,
add_vis=add_vis,
ret_vis=ret_vis,
vis_filter=vis_filter,
antpairpol_cache=self._antpairpol_cache[model_key],
seed=seed,
**kwargs,
) # This is None if ret_vis is False
if add_vis:
# Record the component simulated and the parameters used.
if defaults._override_defaults:
for param in getattr(model, "kwargs", {}):
if param not in kwargs and param in defaults():
kwargs[param] = defaults(param)
self._update_history(model, **kwargs)
if seed:
kwargs["seed"] = seed
self._update_seeds(model_key)
if vis_filter is not None:
kwargs["vis_filter"] = vis_filter
self._components[model_key] = kwargs
self._components[model_key]["alias"] = component
else:
del self._antpairpol_cache[model_key]
return data
def get(
self,
component: Component,
key: Optional[Union[int, str, AntPair, AntPairPol]] = None,
) -> Union[np.ndarray, Dict[int, np.ndarray]]:
"""
Retrieve an effect that was previously simulated.
Parameters
----------
component
Effect that is to be retrieved. See :meth:`add` for more details.
key
Key for retrieving simulated effect. Possible choices are as follows:
An integer may specify either a single antenna (for per-antenna
effects) or be a ``pyuvdata``-style baseline integer.
A string specifying a polarization can be used to retrieve the
effect for every baseline for the specified polarization.
A length-2 tuple of integers can be used to retrieve the effect
for that baseline for all polarizations.
A length-3 tuple specifies a particular baseline and polarization
for which to retrieve the effect.
Not specifying a key results in the effect being returned for all
baselines (or antennas, if the effect is per-antenna) and polarizations.
Returns
-------
effect
The simulated effect appropriate for the provided key. Return type
depends on the effect being simulated and the provided key. See the
tutorial Jupyter notebook | |
userMeans, userStdDevs = normalizeRatings(denseRatings, boolViewings)
similarities, user_commonViewings = buildUsersGraph(normRatings, boolViewings, verbose=verbose)
neighbors = buildNeighbors(user_commonViewings)
sorted_neighbors = sortNeighbors(neighbors, similarities, useTuples=True, verbose=verbose)
model = {'nF': nF, 'nU': nU, 'boolViewings': boolViewings, 'denseRatings': denseRatings, 'normRatings': normRatings,
'userMeans': userMeans, 'userStdDevs': userStdDevs, 'sortedNeighbors': sorted_neighbors}
return model
def buildFilmsModel(ratings, verbose=True):
"""
Does the whole model building process from the films point of view given the initial sparse ratings matrix
:param ratings: The initial sparse ratings matrix
:param verbose: If set to True, status prints are made during the execution of the code
:return: The built model, as a dictionary containing the following entries:
- 'nF' : The number of films (ie number of rows in the ratings matrix)
- 'nU' : The number of users (ie number of columns in the ratings matrix)
- 'boolViewings' : The boolean (0 or 1) matrix indicating which user has seen which film
- 'denseRatings' : The ratings matrix in a dense form (numpy array)
- 'normRatings' : A dense matrix containing the normalized (per user) ratings
- 'userMeans' : The vector of mean grades given by users
- 'userStdDevs' : The vector of standard deviations of the grades given by users
- 'similarities' : The matrix of similarities between films
- 'sortedNeighbors' : The list of lists of the neighbors of each film, in decreasing order of similarity
"""
nF, nU = ratings.shape
if verbose:
print('Preprocessing data')
boolViewings = booleanViewings(ratings)
denseRatings = sparseToDense(ratings)
normRatings, userMeans, userStdDevs = normalizeRatings(denseRatings, boolViewings)
if verbose:
print('Building graph')
similarities, film_commonViewings = buildFilmsGraph(normRatings, boolViewings, verbose=verbose)
if verbose:
print('Extracting neighbors')
neighbors = buildNeighbors(film_commonViewings)
if verbose:
print('Sorting neighbors')
sorted_neighbors = sortNeighbors(neighbors, similarities, useTuples=False, verbose=verbose)
model = {'nF': nF, 'nU': nU, 'boolViewings': boolViewings, 'denseRatings': denseRatings, 'normRatings': normRatings,
'userMeans': userMeans, 'userStdDevs': userStdDevs, 'similarities': similarities,
'sortedNeighbors': sorted_neighbors}
if verbose:
print('Model built')
return model
def usersPrediction(f, u, k, model, verbose=True):
"""
Predicts the grade of a user for one film given the (users point of view based) model and the parameter k
:param f: The film index
:param u: The user index
:param k: The parameter k of the k nearest neighbors algorithm
:param model: The prediction model, as built by buildUsersModel()
:param verbose: If set to True, status prints are made during the execution of the code
:return: An estimation as a floating point number of the grade the user would give to the film
The prediction is made by taking as many neighbors as possible of u, but at most k, who have seen the film f, chosen
by decreasing order of similarity with u.
The average of their normalized ratings, weighted by their similarities with u, is then computed as a prediction of
the normalized grade of u for f.
The grade is then multiplied by u's standard deviation, added to u's mean grade, and culled if necessary between 1
and 5, to give the final estimate.
If no neighbors of u with a positive similarity with u have seen the film f, the prediction is simply the average
grade given by u.
"""
count = 0
ref_neighbors = []
n_neighbors = len(model['sortedNeighbors'][u])
while len(ref_neighbors) < k and count < n_neighbors:
n, sim = model['sortedNeighbors'][u][count]
if model['boolViewings'][f, n] == 1 and sim > 0:
ref_neighbors.append((n, sim))
count += 1
if len(ref_neighbors) == 0:
if verbose:
print("No (correlated) neighbors have seen the film, returning average grade")
return model['userMeans'][u]
meanGrade = 0
totWeights = 0
for n, sim in ref_neighbors:
meanGrade += (model['userMeans'][u] + model['userStdDevs'][u] * model['normRatings'][f, n]) * sim
totWeights += sim
meanGrade /= totWeights
meanGrade = min(meanGrade, 5)
meanGrade = max(meanGrade, 1)
return meanGrade
def filmsPrediction(f, u, k, model, verbose=True):
"""
Predicts the grade of a user for one film given the (films point of view based) model and the parameter k
:param f: The film index
:param u: The user index
:param k: The parameter k of the k nearest neighbors algorithm
:param model: The prediction model, as built by buildFilmsModel()
:param verbose: If set to True, status prints are made during the execution of the code
:return: An estimation as a floating point number of the grade the user would give to the film
The prediction is made by taking as many neighbors as possible of f, but at most k, who have been graded by u,
chosen by decreasing order of similarity with f.
The average of their normalized grades by u, weighted by their similarities with f, is then computed as a prediction
of the normalized grade of u for f.
The grade is then multiplied by u's standard deviation, added to u's mean grade, and culled if necessary between 1
and 5, to give the final estimate.
If no neighbors of f with a positive similarity with f have been graded by u, the prediction is simply the average
grade given by u.
"""
count = 0
ref_neighbors = []
n_neighbors = len(model['sortedNeighbors'][f])
while len(ref_neighbors) < k and count < n_neighbors:
n = model['sortedNeighbors'][f][count]
sim = model['similarities'][f, n]
if model['boolViewings'][n, u] == 1 and sim > 0:
ref_neighbors.append((n, sim))
count += 1
if len(ref_neighbors) == 0:
if verbose:
print("No (correlated) neighbors have been seen by the user")
return model['userMeans'][u]
meanGrade = 0
totWeights = 0
for n, sim in ref_neighbors:
meanGrade += model['denseRatings'][n, u] * sim
totWeights += sim
meanGrade /= totWeights
meanGrade = min(meanGrade, 5)
meanGrade = max(meanGrade, 1)
return meanGrade
def usersModel_predictionErrorsOverk(k_list, model, testSet=[], verbose=True):
"""
Computes the prediction train error, and optionally test error, for different values of the parameter k using a user
point of view based model.
:param k_list: List of values of k to use
:param model: Users based model for making predictions
:param testSet: Optional testSet, given as a sparse matrix whose non zero values are used as ground truth
:param verbose: If set to True, status prints are made during the execution of the code
:return: The list of train errors over k, and the (possibly empty) list of test errors over k
"""
tr_errors = []
te_errors = []
for k in k_list:
if verbose:
print('k={}:'.format(k))
count = 0
mse = 0
for u in range(model['nU']):
if verbose and u % 100 == 0:
print('User #{}'.format(u + 1))
for f in range(model['nF']):
if model['boolViewings'][f, u] == 1:
mse += (model['denseRatings'][f, u] - usersPrediction(f, u, k, model, verbose=verbose)) ** 2
count += 1
tr_rmse = np.sqrt(mse / count)
tr_errors.append(tr_rmse)
if verbose:
print("Train RMSE : {}".format(tr_rmse))
if len(testSet) > 0:
count = 0
mse = 0
nnz_rows, nnz_cols = testSet.nonzero()
for f, u in list(zip(nnz_rows, nnz_cols)):
mse += (testSet[f, u] - usersPrediction(f, u, k, model, verbose=verbose)) ** 2
count += 1
te_rmse = np.sqrt(mse / count)
te_errors.append(te_rmse)
if verbose:
("Test RMSE : {}".format(te_rmse))
return tr_errors, te_errors
def filmsModel_predictionErrorsOverk(k_list, model, testSet=[], verbose=False):
"""
Computes the prediction train error, and optionnaly test error, for different values of the parameter k using a
film point of view based model.
:param k_list: List of values of k to use
:param model: Films based model for making predictions
:param testSet: Optional testSet, given as a sparse matrix whose non zero values are used as ground truth
:param verbose: If set to True, status prints are made during the execution of the code
:return: The list of train errors over k, and the (possibly empty) list of test errors over k
"""
tr_errors = []
te_errors = []
for k in k_list:
if verbose:
print('k={}:'.format(k))
count = 0
mse = 0
for u in range(model['nU']):
if verbose and u % 100 == 0:
print('User #{}'.format(u + 1))
for f in range(model['nF']):
if model['boolViewings'][f, u] == 1:
mse += (model['denseRatings'][f, u] - filmsPrediction(f, u, k, model, verbose=verbose)) ** 2
count += 1
tr_rmse = np.sqrt(mse / count)
tr_errors.append(tr_rmse)
if verbose:
print("Train RMSE : {}".format(tr_rmse))
if len(testSet) > 0:
count = 0
mse = 0
nnz_rows, nnz_cols = testSet.nonzero()
for | |
(Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
back (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
"""
lib.TCOD_console_set_color_control(con, fore, back)
@deprecate("Check the `con.default_bg` attribute instead.")
def console_get_default_background(con: tcod.console.Console) -> Color:
"""Return this consoles default background color.
.. deprecated:: 8.5
Use :any:`Console.default_bg` instead.
"""
return Color._new_from_cdata(
lib.TCOD_console_get_default_background(_console(con))
)
@deprecate("Check the `con.default_fg` attribute instead.")
def console_get_default_foreground(con: tcod.console.Console) -> Color:
"""Return this consoles default foreground color.
.. deprecated:: 8.5
Use :any:`Console.default_fg` instead.
"""
return Color._new_from_cdata(
lib.TCOD_console_get_default_foreground(_console(con))
)
@deprecate("Directly access a consoles background color with `console.bg`")
def console_get_char_background(
con: tcod.console.Console, x: int, y: int
) -> Color:
"""Return the background color at the x,y of this console.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.bg`.
"""
return Color._new_from_cdata(
lib.TCOD_console_get_char_background(_console(con), x, y)
)
@deprecate("Directly access a consoles foreground color with `console.fg`")
def console_get_char_foreground(
con: tcod.console.Console, x: int, y: int
) -> Color:
"""Return the foreground color at the x,y of this console.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.fg`.
"""
return Color._new_from_cdata(
lib.TCOD_console_get_char_foreground(_console(con), x, y)
)
@deprecate("Directly access a consoles characters with `console.ch`")
def console_get_char(con: tcod.console.Console, x: int, y: int) -> int:
"""Return the character at the x,y of this console.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.ch`.
"""
return lib.TCOD_console_get_char(_console(con), x, y) # type: ignore
@pending_deprecate()
def console_set_fade(fade: int, fadingColor: Tuple[int, int, int]) -> None:
lib.TCOD_console_set_fade(fade, fadingColor)
@pending_deprecate()
def console_get_fade() -> int:
return int(lib.TCOD_console_get_fade())
@pending_deprecate()
def console_get_fading_color() -> Color:
return Color._new_from_cdata(lib.TCOD_console_get_fading_color())
# handling keyboard input
@deprecate("Use the tcod.event.wait function to wait for events.")
def console_wait_for_keypress(flush: bool) -> Key:
"""Block until the user presses a key, then returns a new Key.
Args:
flush bool: If True then the event queue is cleared before waiting
for the next event.
Returns:
Key: A new Key instance.
.. deprecated:: 9.3
Use the :any:`tcod.event.wait` function to wait for events.
"""
key = Key()
lib.TCOD_console_wait_for_keypress_wrapper(key.key_p, flush)
return key
@deprecate("Use the tcod.event.get function to check for events.")
def console_check_for_keypress(flags: int = KEY_RELEASED) -> Key:
"""
.. deprecated:: 9.3
Use the :any:`tcod.event.get` function to check for events.
"""
key = Key()
lib.TCOD_console_check_for_keypress_wrapper(key.key_p, flags)
return key
@pending_deprecate()
def console_is_key_pressed(key: int) -> bool:
return bool(lib.TCOD_console_is_key_pressed(key))
# using offscreen consoles
@deprecate("Create a console using `tcod.console.Console(...)` instead.")
def console_new(w: int, h: int) -> tcod.console.Console:
"""Return an offscreen console of size: w,h.
.. deprecated:: 8.5
Create new consoles using :any:`tcod.console.Console` instead of this
function.
"""
return tcod.console.Console(w, h)
def console_from_file(filename: str) -> tcod.console.Console:
"""Return a new console object from a filename.
The file format is automactially determined. This can load REXPaint `.xp`,
ASCII Paint `.apf`, or Non-delimited ASCII `.asc` files.
Args:
filename (Text): The path to the file, as a string.
Returns: A new :any`Console` instance.
"""
return tcod.console.Console._from_cdata(
lib.TCOD_console_from_file(filename.encode("utf-8"))
)
@deprecate("Call the `Console.blit` method instead.")
def console_blit(
src: tcod.console.Console,
x: int,
y: int,
w: int,
h: int,
dst: tcod.console.Console,
xdst: int,
ydst: int,
ffade: float = 1.0,
bfade: float = 1.0,
) -> None:
"""Blit the console src from x,y,w,h to console dst at xdst,ydst.
.. deprecated:: 8.5
Call the :any:`Console.blit` method instead.
"""
lib.TCOD_console_blit(
_console(src), x, y, w, h, _console(dst), xdst, ydst, ffade, bfade
)
@deprecate(
"Pass the key color to `Console.blit` instead of calling this function."
)
def console_set_key_color(
con: tcod.console.Console, col: Tuple[int, int, int]
) -> None:
"""Set a consoles blit transparent color.
.. deprecated:: 8.5
Pass the key color to :any:`tcod.console.Console.blit` instead of
calling this function.
"""
lib.TCOD_console_set_key_color(_console(con), col)
if hasattr(con, "set_key_color"):
con.set_key_color(col)
def console_delete(con: tcod.console.Console) -> None:
"""Closes the window if `con` is the root console.
libtcod objects are automatically garbage collected once they go out of
scope.
This function exists for backwards compatibility.
.. deprecated:: 9.3
This function is not needed for normal :any:`tcod.console.Console`'s.
The root console should be used in a with statement instead to ensure
that it closes.
"""
con = _console(con)
if con == ffi.NULL:
lib.TCOD_console_delete(con)
warnings.warn(
"Instead of this call you should use a with statement to ensure"
" the root console closes, for example:"
"\n with tcod.console_init_root(...) as root_console:"
"\n ...",
DeprecationWarning,
stacklevel=2,
)
else:
warnings.warn(
"You no longer need to make this call, "
"Console's are deleted when they go out of scope.",
DeprecationWarning,
stacklevel=2,
)
@deprecate("Assign to the console.fg array instead.")
def console_fill_foreground(
con: tcod.console.Console,
r: Sequence[int],
g: Sequence[int],
b: Sequence[int],
) -> None:
"""Fill the foregound of a console with r,g,b.
Args:
con (Console): Any Console instance.
r (Sequence[int]): An array of integers with a length of width*height.
g (Sequence[int]): An array of integers with a length of width*height.
b (Sequence[int]): An array of integers with a length of width*height.
.. deprecated:: 8.4
You should assign to :any:`tcod.console.Console.fg` instead.
"""
if len(r) != len(g) or len(r) != len(b):
raise TypeError("R, G and B must all have the same size.")
if (
isinstance(r, np.ndarray)
and isinstance(g, np.ndarray)
and isinstance(b, np.ndarray)
):
# numpy arrays, use numpy's ctypes functions
r_ = np.ascontiguousarray(r, dtype=np.intc)
g_ = np.ascontiguousarray(g, dtype=np.intc)
b_ = np.ascontiguousarray(b, dtype=np.intc)
cr = ffi.cast("int *", r_.ctypes.data)
cg = ffi.cast("int *", g_.ctypes.data)
cb = ffi.cast("int *", b_.ctypes.data)
else:
# otherwise convert using ffi arrays
cr = ffi.new("int[]", r)
cg = ffi.new("int[]", g)
cb = ffi.new("int[]", b)
lib.TCOD_console_fill_foreground(_console(con), cr, cg, cb)
@deprecate("Assign to the console.bg array instead.")
def console_fill_background(
con: tcod.console.Console,
r: Sequence[int],
g: Sequence[int],
b: Sequence[int],
) -> None:
"""Fill the backgound of a console with r,g,b.
Args:
con (Console): Any Console instance.
r (Sequence[int]): An array of integers with a length of width*height.
g (Sequence[int]): An array of integers with a length of width*height.
b (Sequence[int]): An array of integers with a length of width*height.
.. deprecated:: 8.4
You should assign to :any:`tcod.console.Console.bg` instead.
"""
if len(r) != len(g) or len(r) != len(b):
raise TypeError("R, G and B must all have the same size.")
if (
isinstance(r, np.ndarray)
and isinstance(g, np.ndarray)
and isinstance(b, np.ndarray)
):
# numpy arrays, use numpy's ctypes functions
r_ = np.ascontiguousarray(r, dtype=np.intc)
g_ = np.ascontiguousarray(g, dtype=np.intc)
b_ = np.ascontiguousarray(b, dtype=np.intc)
cr = ffi.cast("int *", r_.ctypes.data)
cg = ffi.cast("int *", g_.ctypes.data)
cb = ffi.cast("int *", b_.ctypes.data)
else:
# otherwise convert using ffi arrays
cr = ffi.new("int[]", r)
cg = ffi.new("int[]", g)
cb = ffi.new("int[]", b)
lib.TCOD_console_fill_background(_console(con), cr, cg, cb)
@deprecate("Assign to the console.ch array instead.")
def console_fill_char(con: tcod.console.Console, arr: Sequence[int]) -> None:
"""Fill the character tiles of a console with an array.
`arr` is an array of integers with a length of the consoles width and
height.
.. deprecated:: 8.4
You should assign to :any:`tcod.console.Console.ch` instead.
"""
if isinstance(arr, np.ndarray):
# numpy arrays, use numpy's ctypes functions
np_array = np.ascontiguousarray(arr, dtype=np.intc)
carr = ffi.cast("int *", np_array.ctypes.data)
else:
# otherwise convert using the ffi module
carr = ffi.new("int[]", arr)
lib.TCOD_console_fill_char(_console(con), carr)
@pending_deprecate()
def console_load_asc(con: tcod.console.Console, filename: str) -> bool:
"""Update a console from a non-delimited ASCII `.asc` file."""
return bool(
lib.TCOD_console_load_asc(_console(con), filename.encode("utf-8"))
)
@pending_deprecate()
def console_save_asc(con: tcod.console.Console, filename: str) -> bool:
"""Save a console to a non-delimited ASCII `.asc` file."""
return bool(
lib.TCOD_console_save_asc(_console(con), filename.encode("utf-8"))
)
@pending_deprecate()
def console_load_apf(con: tcod.console.Console, filename: str) -> bool:
"""Update a console from an ASCII Paint `.apf` file."""
return bool(
lib.TCOD_console_load_apf(_console(con), filename.encode("utf-8"))
)
@pending_deprecate()
def console_save_apf(con: tcod.console.Console, filename: str) -> bool:
"""Save a console to an ASCII Paint `.apf` file."""
return bool(
lib.TCOD_console_save_apf(_console(con), filename.encode("utf-8"))
)
def console_load_xp(con: tcod.console.Console, filename: str) -> bool:
"""Update a console from a REXPaint `.xp` file."""
return bool(
lib.TCOD_console_load_xp(_console(con), filename.encode("utf-8"))
)
def console_save_xp(
con: tcod.console.Console, filename: str, compress_level: int = 9
) -> bool:
"""Save a console to a REXPaint `.xp` file."""
return bool(
lib.TCOD_console_save_xp(
_console(con), filename.encode("utf-8"), compress_level
)
)
def console_from_xp(filename: str) -> tcod.console.Console:
"""Return a single console from a REXPaint `.xp` file."""
return tcod.console.Console._from_cdata(
lib.TCOD_console_from_xp(filename.encode("utf-8"))
)
def console_list_load_xp(
filename: str
) -> Optional[List[tcod.console.Console]]:
"""Return a list of consoles from a REXPaint `.xp` file."""
tcod_list = lib.TCOD_console_list_from_xp(filename.encode("utf-8"))
if tcod_list == ffi.NULL:
return None
try:
python_list = []
lib.TCOD_list_reverse(tcod_list)
while not lib.TCOD_list_is_empty(tcod_list):
python_list.append(
tcod.console.Console._from_cdata(lib.TCOD_list_pop(tcod_list))
)
return python_list
finally:
lib.TCOD_list_delete(tcod_list)
def console_list_save_xp(
console_list: Sequence[tcod.console.Console],
filename: str,
compress_level: int = 9,
) -> bool:
"""Save a list of consoles to a REXPaint `.xp` file."""
tcod_list = lib.TCOD_list_new()
try:
for console in console_list:
lib.TCOD_list_push(tcod_list, _console(console))
return bool(
lib.TCOD_console_list_save_xp(
tcod_list, | |
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-09 09:53:18.676780
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.system_interfaces.setting_controller import SettingController
class CacheSettingAtt(SettingController):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| System.SettingController
| CacheSettingAtt
|
| Represents the base object to handle the parameters of the
| cache.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.cache_setting_att = com_object
@property
def activation_mode(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780)
| o Property ActivationMode() As boolean
|
| Returns or sets the activation state of cache.
| Role: Returns or sets the value of cache activation.
:return: bool
"""
return self.cache_setting_att.ActivationMode
@activation_mode.setter
def activation_mode(self, value):
"""
:param bool value:
"""
self.cache_setting_att.ActivationMode = value
@property
def cache_max_size_mo(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780)
| o Property CacheMaxSizeMo() As long
|
| Returns or sets the value of the cache maximum size.
| Role: Returns or sets the value of the maximum allowed cache size in Mo
:return: int
"""
return self.cache_setting_att.CacheMaxSizeMo
@cache_max_size_mo.setter
def cache_max_size_mo(self, value):
"""
:param int value:
"""
self.cache_setting_att.CacheMaxSizeMo = value
@property
def lod_mode(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780)
| o Property LODMode() As boolean
|
| Returns or sets the LOD generation mode parameter.
| Role: Returns or sets the value of the LOD generation mode.
:return: bool
"""
return self.cache_setting_att.LODMode
@lod_mode.setter
def lod_mode(self, value):
"""
:param bool value:
"""
self.cache_setting_att.LODMode = value
@property
def local_path(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780)
| o Property LocalPath() As CATBSTR
|
| Retrieves or sets the cache local path.
| Role: Retrieves or sets the value of the cache local path. If the local
| path is defined with environment variables then this method return the
| unexpansed form.
:return: str
"""
return self.cache_setting_att.LocalPath
@local_path.setter
def local_path(self, value):
"""
:param str value:
"""
self.cache_setting_att.LocalPath = value
@property
def released_voxel(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780)
| o Property ReleasedVoxel() As float
|
| Returns or sets the released voxel parameter.
| Role: Returns or sets the value of the released voxel parameter.
:return: float
"""
return self.cache_setting_att.ReleasedVoxel
@released_voxel.setter
def released_voxel(self, value):
"""
:param float value:
"""
self.cache_setting_att.ReleasedVoxel = value
@property
def size_control(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780)
| o Property SizeControl() As boolean
|
| Return or sets the cache size control.
| Role: Returns or sets the cache size control. The cache use this parameter
| in conjunction with the maxixum allowed cache size. If it is turned off, the
| cache size has no limit.
:return: bool
"""
return self.cache_setting_att.SizeControl
@size_control.setter
def size_control(self, value):
"""
:param bool value:
"""
self.cache_setting_att.SizeControl = value
@property
def timestamp_mode(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780)
| o Property TimestampMode() As boolean
|
| Retrieves or sets the timestamp control.
| Role: If the timestamp control is turned on, the cache will verify if the
| cached object is uptodate with the master object. If not a new cached view will
| be generated.
| If the timestamp control is turned off, the cache will consider that the
| cached views are always uptodate with their master object.
:return: bool
"""
return self.cache_setting_att.TimestampMode
@timestamp_mode.setter
def timestamp_mode(self, value):
"""
:param bool value:
"""
self.cache_setting_att.TimestampMode = value
@property
def utc_time_format(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780)
| o Property UTCTimeFormat() As boolean
|
| Retrieves or sets the the cache timestamp format.
| Role: If the timestamp format is set to TRUE, then the time used used as
| timestamp by the cache is expressed in UTC format (GMT), in the other case the
| local time is used. The default format is local time.
:return: bool
"""
return self.cache_setting_att.UTCTimeFormat
@utc_time_format.setter
def utc_time_format(self, value):
"""
:param bool value:
"""
self.cache_setting_att.UTCTimeFormat = value
def get_activation_mode_info(self, admin_level, o_locked):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780))
| o Func GetActivationModeInfo(CATBSTR AdminLevel,
| CATBSTR oLocked) As boolean
|
| Retrieves informations about the Cache activation mode.
| Refer to SettingController for a detailed description.
:param str admin_level:
:param str o_locked:
:return: None
"""
return self.cache_setting_att.GetActivationModeInfo(admin_level, o_locked)
def get_cache_max_size_mo_info(self, admin_level, o_locked):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780))
| o Func GetCacheMaxSizeMoInfo(CATBSTR AdminLevel,
| CATBSTR oLocked) As boolean
|
| Retrieves environment informations for the Cache maximum
| size.
| Refer to SettingController for a detailed description.
:param str admin_level:
:param str o_locked:
:return: None
"""
return self.cache_setting_att.GetCacheMaxSizeMoInfo(admin_level, o_locked)
def get_lod_mode_info(self, admin_level, o_locked):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780))
| o Func GetLODModeInfo(CATBSTR AdminLevel,
| CATBSTR oLocked) As boolean
|
| Retrieves environment informations for the LOD generation
| mode.
| Refer to SettingController for a detailed description.
:param str admin_level:
:param str o_locked:
:return: None
"""
return self.cache_setting_att.GetLODModeInfo(admin_level, o_locked)
def get_local_path_info(self, admin_level, o_locked):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780))
| o Func GetLocalPathInfo(CATBSTR AdminLevel,
| CATBSTR oLocked) As boolean
|
| Retrieves environment informations for the Cache local
| path.
| Refer to SettingController for a detailed description.
:param str admin_level:
:param str o_locked:
:return: None
"""
return self.cache_setting_att.GetLocalPathInfo(admin_level, o_locked)
def get_release_path(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780))
| o Func GetReleasePath() As CATSafeArrayVariant
|
| Retieves the cache release paths.
| Role: Sets the cache release paths in a symbolic format.
|
| Parameters:
|
| ioRelPath
| a CATSafeArrayVariant of CATBSTR.
|
| Returns:
| Legal values:
| S_OK : on Success
| E_FAIL: on failure
:return: tuple
"""
return tuple(self.cache_setting_att.GetReleasePath())
def get_release_path_info(self, admin_level, locked):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780))
| o Func GetReleasePathInfo(CATBSTR AdminLevel,
| CATBSTR Locked) As boolean
|
| Retrieves environment informations for the Cache release
| path.
| Refer to SettingController for a detailed description.
:param str admin_level:
:param str locked:
:return: None
"""
return self.cache_setting_att.GetReleasePathInfo(admin_level, locked)
def get_released_voxel_info(self, admin_level, o_locked):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780))
| o Func GetReleasedVoxelInfo(CATBSTR AdminLevel,
| CATBSTR oLocked) As boolean
|
| Retrieves environment informations for the Cache released
| voxel.
| Refer to SettingController for a detailed description.
:param str admin_level:
:param str o_locked:
:return: None
"""
return self.cache_setting_att.GetReleasedVoxelInfo(admin_level, o_locked)
def get_size_control_info(self, admin_level, o_locked):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780))
| o Func GetSizeControlInfo(CATBSTR AdminLevel,
| CATBSTR oLocked) As boolean
|
| Retrieves environment informations for the size control
| mode.
| Refer to SettingController for a detailed description.
:param str admin_level:
:param str o_locked:
:return: None
"""
return self.cache_setting_att.GetSizeControlInfo(admin_level, o_locked)
def get_timestamp_mode_info(self, admin_level, o_locked):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780))
| o Func GetTimestampModeInfo(CATBSTR AdminLevel,
| CATBSTR oLocked) As boolean
|
| Retrieves environment informations for the timestamp control
| mode.
| Refer to SettingController for a detailed description.
:param str admin_level:
:param str o_locked:
:return: None
"""
return self.cache_setting_att.GetTimestampModeInfo(admin_level, o_locked)
def get_utc_time_format_info(self, admin_level, o_locked):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-09 09:53:18.676780))
| o Func GetUTCTimeFormatInfo(CATBSTR AdminLevel,
| CATBSTR oLocked) As boolean
|
| Retrieves | |
== f"Level {level} not available. Number of available levels: 1"
)
@pytest.mark.parametrize(
"properties",
(
(
{
"openslide.objective-power": "20",
"openslide.level[1].downsample": "4.003",
"openslide.level[2].downsample": "16",
}
),
),
)
def it_knows_its_magnification(self, request, properties, tmpdir):
slide, _ = base_test_slide(tmpdir, PILIMG.RGBA_COLOR_500X500_155_249_240)
property_mock(request, Slide, "properties", return_value=properties)
property_mock(request, Slide, "levels", return_value=[0, 1, 2])
assert slide.level_magnification_factor(1) == "5.0X"
assert slide.level_magnification_factor(2) == "1.25X"
@pytest.mark.parametrize(
"properties, error",
(
(
{
"openslide.objective-power": "20",
},
"Downsample factor for level 1 not available. Available slide "
"properties: ['openslide.objective-power']",
),
(
{
"openslide.level[1].downsample": "4.003",
"openslide.level[2].downsample": "16",
},
"Native magnification not available. Available slide properties: "
"['openslide.level[1].downsample', 'openslide.level[2].downsample']",
),
),
)
def but_it_raises_an_exception_if_metadata_are_unavailable(
self, request, properties, error, tmpdir
):
slide, _ = base_test_slide(tmpdir, PILIMG.RGBA_COLOR_500X500_155_249_240)
property_mock(request, Slide, "properties", return_value=properties)
property_mock(request, Slide, "levels", return_value=[0, 1, 2])
with pytest.raises(SlidePropertyError) as err:
slide.level_magnification_factor(1)
assert isinstance(err.value, SlidePropertyError)
assert str(err.value) == error
@pytest.mark.parametrize(
"properties",
(
(
{
"openslide.objective-power": "20",
"openslide.level[1].downsample": "4.003",
"openslide.level[2].downsample": "16",
},
),
),
)
def and_it_raises_an_exception_if_level_in_incorrect(
self, request, properties, tmpdir
):
slide, _ = base_test_slide(tmpdir, PILIMG.RGBA_COLOR_500X500_155_249_240)
property_mock(request, Slide, "properties", return_value=properties)
property_mock(request, Slide, "levels", return_value=[0, 1, 2])
with pytest.raises(LevelError) as err:
slide.level_magnification_factor(4)
assert isinstance(err.value, LevelError)
assert str(err.value) == "Level 4 not available. Number of available levels: 3"
@pytest.mark.parametrize("level", (1, -3))
def it_raises_an_exception_when_magnification_factor_is_unavailable(
self, level, tmpdir
):
slide, _ = base_test_slide(tmpdir, PILIMG.RGBA_COLOR_500X500_155_249_240)
with pytest.raises(LevelError) as err:
slide.level_magnification_factor(level=level)
assert isinstance(err.value, LevelError)
assert (
str(err.value)
== f"Level {level} not available. Number of available levels: 1"
)
def it_raises_an_exception_when_native_magnification_in_unavailable(self, tmpdir):
slide, _ = base_test_slide(tmpdir, PILIMG.RGBA_COLOR_500X500_155_249_240)
with pytest.raises(SlidePropertyError) as err:
slide.level_magnification_factor()
assert isinstance(err.value, SlidePropertyError)
assert (
str(err.value)
== "Native magnification not available. Available slide properties: []"
)
@pytest.mark.parametrize(
"coords, expected_result",
(
(CP(0, 40, 0, 40), True), # point
(CP(0, 0, 48, 50), True), # valid box
(CP(800000, 90000, 8000010, 90010), False), # out of bounds box
(CP(800000, 90000, -1, 90010), False), # negative coordinates
),
)
def it_knows_if_coords_are_valid(self, coords, expected_result, tmpdir):
slide, _ = base_test_slide(tmpdir, PILIMG.RGBA_COLOR_49X51_155_0_0)
_are_valid = slide._has_valid_coords(coords)
assert type(_are_valid) == bool
assert _are_valid == expected_result
def it_knows_its_levels(self, tmpdir):
slide, _ = base_test_slide(tmpdir, PILIMG.RGBA_COLOR_500X500_155_249_240)
levels = slide.levels
assert type(levels) == list
assert levels == [0]
def it_can_access_to_its_properties(self, request):
slide = Slide("path", "processed")
properties = property_mock(request, Slide, "properties")
properties.return_value = {"foo": "bar"}
assert slide.properties == {"foo": "bar"}
@pytest.mark.parametrize("level, expected_value", ((-1, 8), (-2, 7), (-9, 0)))
def it_can_remap_negative_level_indices(self, level, expected_value, levels_prop):
levels_prop.return_value = [0, 1, 2, 3, 4, 5, 6, 7, 8]
slide = Slide("path", "processed")
assert slide._remap_level(level) == expected_value
def but_it_raises_a_level_error_when_it_cannot_be_mapped(self, tmpdir, levels_prop):
levels_prop.return_value = [0, 1, 2, 3, 4, 5, 6, 7, 8]
slide, _ = base_test_slide(tmpdir, PILIMG.RGB_RANDOM_COLOR_500X500)
with pytest.raises(LevelError) as err:
slide._remap_level(-10)
assert isinstance(err.value, LevelError)
assert (
str(err.value) == "Level -10 not available. Number of available levels: 1"
)
# fixture components ---------------------------------------------
@pytest.fixture
def levels_prop(self, request):
return property_mock(request, Slide, "levels")
@pytest.fixture
def resampled_dims_(self, request):
return method_mock(request, Slide, "_resampled_dimensions")
@pytest.fixture
def dimensions_(self, request):
return property_mock(request, Slide, "dimensions")
class Describe_Slideset:
def it_constructs_from_args(self, request):
_init_ = initializer_mock(request, SlideSet)
_slides_path = "/foo/bar/"
_processed_path = "/foo/bar/wsislides/processed"
_valid_extensions = [".svs", ".tiff"]
_keep_slides = ["mywsi.svs"]
_slide_kwargs = {"use_largeimage": True}
slideset = SlideSet(
_slides_path,
_processed_path,
_valid_extensions,
_keep_slides,
_slide_kwargs,
)
_init_.assert_called_once_with(
ANY,
_slides_path,
_processed_path,
_valid_extensions,
_keep_slides,
_slide_kwargs,
)
assert isinstance(slideset, SlideSet)
def it_can_construct_slides(self, request, tmpdir, Slide_):
tmp_path_ = tmpdir.mkdir("myslide")
slides_ = method_mock(request, SlideSet, "__iter__")
slides_.return_value = [Slide_ for _ in range(10)]
slideset = SlideSet(tmp_path_, os.path.join(tmp_path_, "b"), [".svs"])
slides = slideset.__iter__()
slides_.assert_called_once_with(slideset)
assert len(slides) == 10
def it_knows_its_slides(self, tmpdir):
tmp_path_ = tmpdir.mkdir("myslide")
image = PILIMG.RGBA_COLOR_500X500_155_249_240
image.save(os.path.join(tmp_path_, "mywsi1.svs"), "TIFF")
image.save(os.path.join(tmp_path_, "mywsi2.svs"), "TIFF")
slideset = SlideSet(tmp_path_, "proc", [".svs"])
assert len(slideset) == 2
# it can keep a subset of slides
slideset = SlideSet(tmp_path_, "proc", [".svs"], keep_slides=["mywsi1.svs"])
assert len(slideset) == 1
slideset = SlideSet(None, "proc", [".svs"])
assert len(slideset) == 0
with pytest.raises(FileNotFoundError) as err:
slideset = SlideSet("fake/path", "proc", [".svs"])
list(slideset)
assert isinstance(err.value, FileNotFoundError)
assert err.value.errno == errno.ENOENT
@pytest.mark.parametrize("slide_kwargs", (({"use_largeimage": True}), ({})))
def it_creates_its_slides_with_the_correct_parameters(
self, tmpdir, request, slide_kwargs
):
slide_init_ = initializer_mock(request, Slide)
tmp_path_ = tmpdir.mkdir("myslide")
image = PILIMG.RGBA_COLOR_500X500_155_249_240
image.save(os.path.join(tmp_path_, "mywsi1.svs"), "TIFF")
slideset = SlideSet(tmp_path_, "proc", [".svs"], slide_kwargs=slide_kwargs)
slideset.__iter__()
slide_init_.assert_called_once_with(
ANY, os.path.join(tmp_path_, "mywsi1.svs"), "proc", **slide_kwargs
)
def it_can_access_directly_to_the_slides(self, request, Slide_):
slideset = instance_mock(request, SlideSet)
slideset.__iter__.side_effect = iter([Slide_])
slideset[0]
slideset.__getitem__.assert_called_once_with(0)
def and_it_is_exaclty_what_expected(self, tmpdir):
tmp_path_ = tmpdir.mkdir("myslide")
image = PILIMG.RGBA_COLOR_500X500_155_249_240
image.save(os.path.join(tmp_path_, "mywsi.svs"), "TIFF")
slideset = SlideSet(tmp_path_, "proc", [".svs"])
slide = slideset[0]
np.testing.assert_array_almost_equal(
slide.resampled_array(), slideset[0].resampled_array()
)
def it_constructs_its_sequence_of_slides_to_help(self, tmpdir):
tmp_path_ = tmpdir.mkdir("myslide")
image = PILIMG.RGBA_COLOR_500X500_155_249_240
image.save(os.path.join(tmp_path_, "mywsi.svs"), "TIFF")
image2 = PILIMG.RGBA_COLOR_50X50_155_0_0
image2.save(os.path.join(tmp_path_, "mywsi2.svs"), "TIFF")
slideset = SlideSet(tmp_path_, "proc", [".svs"])
expected_slides = [
Slide(os.path.join(tmp_path_, _path), "proc")
for _path in os.listdir(tmp_path_)
]
slides = slideset.__iter__()
for i, slide in enumerate(slides):
np.testing.assert_array_almost_equal(
slide.resampled_array(), expected_slides[i].resampled_array()
)
def it_knows_the_slides_dimensions(self, tmpdir):
tmp_path_ = tmpdir.mkdir("myslide")
image = PILIMG.RGBA_COLOR_500X500_155_249_240
image.save(os.path.join(tmp_path_, "mywsi.svs"), "TIFF")
image2 = PILIMG.RGBA_COLOR_50X50_155_0_0
image2.save(os.path.join(tmp_path_, "mywsi2.svs"), "TIFF")
slideset = SlideSet(tmp_path_, "proc", [".svs"])
slides_dimensions = slideset._slides_dimensions
expected_value = [
{"slide": "mywsi", "width": 500, "height": 500, "size": 250000},
{"slide": "mywsi2", "width": 50, "height": 50, "size": 2500},
]
assert dict_list_eq(slides_dimensions, expected_value) is True
def it_knows_its_slides_dimensions_list(self, tmpdir):
tmp_path_ = tmpdir.mkdir("myslide")
image = PILIMG.RGBA_COLOR_500X500_155_249_240
image.save(os.path.join(tmp_path_, "mywsi.svs"), "TIFF")
image2 = PILIMG.RGBA_COLOR_50X50_155_0_0
image2.save(os.path.join(tmp_path_, "mywsi2.svs"), "TIFF")
slideset = SlideSet(tmp_path_, "proc", [".svs"])
_slides_dimensions_list = slideset._slides_dimensions_list
assert sorted(_slides_dimensions_list) == sorted([(500, 500), (50, 50)])
def it_knows_its_total_slides(self, request, Slide_):
slides = method_mock(request, SlideSet, "__iter__")
slides.return_value = [Slide_ for _ in range(4)]
slideset = SlideSet("the/path", "proc", [".svs"])
total_slides = len(slideset)
assert total_slides == 4
def it_knows_its_avg_width_slide(self, _slides_dimensions_prop, total_slides_prop):
total_slides_prop.return_value = 2
_slides_dimensions_prop.return_value = [
{"slide": "mywsi", "width": 500, "height": 500, "size": 250000},
{"slide": "mywsi2", "width": 50, "height": 50, "size": 2500},
]
slideset = SlideSet("fake/path", "proc", [".svs"])
_avg_width_slide = slideset._avg_width_slide
assert _avg_width_slide == 275.0
assert (
_avg_width_slide
== sum(d["width"] for d in _slides_dimensions_prop.return_value) / 2
)
def it_knows_its_avg_height_slide(self, _slides_dimensions_prop, total_slides_prop):
total_slides_prop.return_value = 2
_slides_dimensions_prop.return_value = [
{"slide": "mywsi", "width": 500, "height": 100, "size": 250000},
{"slide": "mywsi2", "width": 50, "height": 50, "size": 2500},
]
slideset = SlideSet("fake/path", "proc", [".svs"])
_avg_height_slide = slideset._avg_height_slide
assert _avg_height_slide == 75.0
assert (
_avg_height_slide
== sum(d["height"] for d in _slides_dimensions_prop.return_value) / 2
)
def it_knows_its_avg_size_slide(self, _slides_dimensions_prop, total_slides_prop):
total_slides_prop.return_value = 2
_slides_dimensions_prop.return_value = [
{"slide": "mywsi", "width": 500, "height": 100, "size": 250000},
{"slide": "mywsi2", "width": 50, "height": 50, "size": 2500},
]
slideset = SlideSet("fake/path", "proc", [".svs"])
_avg_size_slide = slideset._avg_size_slide
assert _avg_size_slide == 126250.0
assert (
_avg_size_slide
== sum(d["size"] for d in _slides_dimensions_prop.return_value) / 2
)
def it_knows_its_max_height_slide(self, _slides_dimensions_prop):
_slides_dimensions_prop.return_value = [
{"slide": "mywsi", "width": 500, "height": 100, "size": 250000},
{"slide": "mywsi2", "width": 50, "height": 50, "size": 2500},
]
slideset = SlideSet("fake/path", "proc", [".svs"])
_max_height_slide = slideset._max_height_slide
assert _max_height_slide == {"slide": "mywsi", "height": 100}
def it_knows_its_max_size_slide(self, _slides_dimensions_prop):
_slides_dimensions_prop.return_value = [
{"slide": "mywsi", "width": 500, "height": 100, "size": 250000},
{"slide": "mywsi2", "width": 50, "height": 50, "size": 2500},
]
slideset = SlideSet("fake/path", "proc", [".svs"])
_max_size_slide = slideset._max_size_slide
assert _max_size_slide == {"slide": "mywsi", "size": 250000}
def it_knows_its_max_width_slide(self, _slides_dimensions_prop):
_slides_dimensions_prop.return_value = [
{"slide": "mywsi", "width": 500, "height": 100, "size": 250000},
{"slide": "mywsi2", "width": 600, "height": 50, "size": 2500},
]
slideset = SlideSet("fake/path", "proc", [".svs"])
_max_width_slide = slideset._max_width_slide
assert _max_width_slide == {"slide": "mywsi2", "width": 600}
def it_knows_its_min_width_slide(self, _slides_dimensions_prop):
_slides_dimensions_prop.return_value = [
{"slide": "mywsi", "width": 500, "height": 100, "size": 250000},
{"slide": "mywsi2", "width": 600, "height": 50, "size": 2500},
]
slideset = SlideSet("fake/path", "proc", [".svs"])
_min_width_slide = slideset._min_width_slide
assert _min_width_slide == {"slide": "mywsi", "width": 500}
def it_knows_its_min_height_slide(self, _slides_dimensions_prop):
_slides_dimensions_prop.return_value = [
{"slide": "mywsi", "width": 500, "height": 100, "size": 250000},
{"slide": "mywsi2", "width": 600, "height": 50, "size": 2500},
]
slideset = SlideSet("fake/path", "proc", [".svs"])
_min_height_slide = slideset._min_height_slide
assert _min_height_slide == {"slide": "mywsi2", "height": 50}
def it_knows_its_min_size_slide(self, _slides_dimensions_prop):
_slides_dimensions_prop.return_value = [
{"slide": "mywsi", "width": 500, "height": 100, "size": 250000},
{"slide": "mywsi2", "width": 600, "height": 50, "size": 2500},
]
slideset = SlideSet("fake/path", "proc", [".svs"])
_min_size_slide = slideset._min_size_slide
assert _min_size_slide == {"slide": "mywsi2", "size": 2500}
def it_knows_its_scaled_slides(self, request, tmpdir):
tmp_path_ = tmpdir.mkdir("myslide")
slide1 = instance_mock(request, Slide)
slide2 = instance_mock(request, Slide)
slideset = SlideSet(tmp_path_, os.path.join(tmp_path_, "processed"), [])
slides = method_mock(request, SlideSet, "__iter__")
slides.return_value = [slide1, slide2]
slideset.scaled_images(32, 2)
slide1.scaled_image.assert_called_once_with(32)
slide2.scaled_image.assert_called_once_with(32)
def it_knows_its_thumbnails(self, request, tmpdir):
tmp_path_ = tmpdir.mkdir("myslide")
thumbnail_ = property_mock(request, Slide, "thumbnail")
slide1 = Slide("foo/bar", "proc")
slide2 = Slide("foo/bar", "proc")
slideset = SlideSet(tmp_path_, os.path.join(tmp_path_, "processed"), [])
slides = method_mock(request, SlideSet, | |
"""
STARDATE
=====================
This package produces posterior PDFs over age, mass, bulk metalicity,
distance and V-band extinction for stars from their spectroscopic parameters
(T_eff, log g and observed bulk metallicity), their apparent magnitudes in
a range of bandpasses, their parallaxes and rotation periods, if available.
The minimum requirements for producing age estimates are photometric colors,
however the addition of extra information improves the constraint on the
stellar properties. In particular, this method is designed to incorporate a
gyrochronology model into standard isochrone fitting. If you do not have
rotation periods for your stars, the posteriors over parameters produced by
this code will be very similar to those produce using the isochrones.py
package on its own.
"""
import numpy as np
from isochrones.mist import MIST_Isochrone
from isochrones import StarModel, get_ichrone
# mist = MIST_Isochrone(bands)
bands = ["B", "V", "J", "H", "K", "BP", "RP", "G"]
mist = get_ichrone("mist", bands=bands)
import emcee
import h5py
def gyro_model(log10_age, bv):
"""Predict a rotation period from an age and B-V colour.
Given a B-V colour and an age, predict a rotation period using the Angus
et al. (2015) gyrochronology model.
Args:
log10_age (float or array): The logarithmic age of a star or stars,
log10(age), in years.
bv (float or array): The B-V colour of a star or stars.
Returns:
Rotation period in days.
"""
age_myr = (10**log10_age)*1e-6
a, b, c, n = [.4, .31, .45, .55]
if bv < c:
return 0
else:
return (n*np.log10(age_myr) + np.log10(a) + b*np.log10(bv-c))
def gk_rotation_model(log10_age, bprp):
"""
Predicts log10 rotation period from log10 color and log10 age.
Only applicable to GK dwarfs.
Args:
log10_age (float): The (log10) age.
bprp (float): The G_bp - G_rp color.
Returns:
log10_period (float): The period.
"""
log10_bprp = np.log10(bprp)
# Parameters with Solar bp - rp = 0.82
p = [-38.957586198640314, 28.709418579540294, -4.919056437046026,
0.7161114835620975, -4.716819674578521, 0.6470950862322454,
-13.558898318835137, 0.9359250478865809]
return np.polyval(p[:5], log10_bprp) + p[5]*log10_age
def gk_age_model(log10_period, bprp):
"""
Predicts log10 age from log10 color and log10 period.
Only applicable to GK dwarfs.
Args:
log10_period (array): The (log10) period array.
log10_bprp (array): The (log10) G_bp - G_rp color array.
Returns:
log10_age (array): The (log10) age array.
"""
log10_bprp = np.log10(bprp)
# Hard-code the gyro parameters :-)
p = [-38.957586198640314, 28.709418579540294, -4.919056437046026,
0.7161114835620975, -4.716819674578521, 0.6470950862322454,
-13.558898318835137, 0.9359250478865809]
logage = (log10_period - np.polyval(p[:5], log10_bprp))/p[5]
return logage
def gyro_model_praesepe(log10_age, bprp):
"""
Predicts log10 rotation period from log10 color and log10 age.
Args:
log10_age (float): The (log10) age.
bprp (float): The G_bp - G_rp color.
Returns:
log10_period (float): The period.
"""
# Log rotation period is zero if the star is very hot.
# Don't try to take log of negative number.
if bprp < 0.:
return .56
log10_bprp = np.log10(bprp)
# Hard-code the gyro parameters :-)
# c4, c3, c2, c1, c0, cA, b1, b0
# Parameters with Solar bp - rp = 0.82
p = [-38.957586198640314, 28.709418579540294, -4.919056437046026,
0.7161114835620975, -4.716819674578521, 0.6470950862322454,
-13.558898318835137, 0.9359250478865809]
# Parameters with Solar bp - rp = 0.77
# p = [-38.982347111370984, 28.706848179526098, -4.922906414784183,
# 0.7176636876966253, -5.489008990829778, 0.7347258099244045,
# -13.55785651951684, 0.16105197784241776]
if log10_bprp >= .43:
return np.polyval(p[6:], log10_bprp) + p[5]*log10_age
elif log10_bprp < -.25:
return 0.56
else:
return np.polyval(p[:5], log10_bprp) + p[5]*log10_age
def age_model(log10_period, bprp):
"""
Predicts log10 age from log10 color and log10 period.
Args:
log10_period (array): The (log10) period array.
log10_bprp (array): The (log10) G_bp - G_rp color array.
Returns:
log10_age (array): The (log10) age array.
"""
# If star is very hot, return the age of the Universe.
# Don't try to take the log of a negative number.
if bprp < 0:
return 10.14
log10_bprp = np.log10(bprp)
# Hard-code the gyro parameters :-)
p = [-38.957586198640314, 28.709418579540294, -4.919056437046026,
0.7161114835620975, -4.716819674578521, 0.6470950862322454,
-13.558898318835137, 0.9359250478865809]
# p = [-38.982347111370984, 28.706848179526098, -4.922906414784183,
# 0.7176636876966253, -5.489008990829778, 0.7347258099244045,
# -13.55785651951684, 0.16105197784241776]
if log10_bprp >= .43:
# return (log10_period - np.polyval(p[6:], log10_bprp))/p[5]
return 10.14 # The age of the universe
elif log10_bprp < -.25:
return 10.14
else:
logage = (log10_period - np.polyval(p[:5], log10_bprp))/p[5]
return logage
def gyro_model_rossby(params, Ro_cutoff=2, rossby=True, model="praesepe"):
"""Predict a rotation period from parameters EEP, age, feh, distance, Av.
Args:
params (array): The stellar parameters: EEP, log10(age), [Fe/H],
distance and Av.
Ro_cutoff (float, optional): The critical Rossby number after which
stars retain their rotation period. This is 2.16 in van Saders et
al. (2016) and 2.08 in van Saders et al. (2018). We adopt a
default value of 2.
rossby (Optional[bool]): If True (default), the van Saders (2016)
weakened magnetic braking law will be implemented. If false, the
gyrochronology relation will be used unmodified.
model (Optional[str)]: The gyrochronology model. If "praesepe", the
Praesepe-based gyro model will be used (default) and if "angus15",
the Angus et al. (2015) model will be used.
Returns:
The log10(rotation period) and the period standard deviation in dex.
"""
if model == "angus15":
color = calc_bv(params)
elif model == "praesepe":
color = calc_bprp(params)
mass = mist.interp_value([params[0], params[1], params[2]], ["mass"])
# If color is nan, return nan. This should be caught by the lhf.
if np.isfinite(color) == False:
return np.nan, np.nan
# Calculate the additional sigma
sig = sigma(params[0], params[1], params[2], color, model=model)
log_P = period_model(color, mass, params[1], Ro_cutoff=Ro_cutoff,
rossby=rossby, model=model)
return log_P, sig
def period_model(color, mass, age, Ro_cutoff=2, rossby=True,
model="praesepe"):
"""Predict a rotation period from an age, color and mass.
Predict a rotation period from an age, color and mass using either the
Angus et al. (2019) Praesepe model or the Angus et al. (2015)
gyrochronology model with the van Saders et al. (2016) weakened magnetic
braking correction.
Args:
color (float): Either a star's Gaia G_BP - G_RP color, if using the
praesepe model, or its B-V color, if using the Angus 2015 model.
mass (float): Stellar mass in Solar units.
age (float): log10 stellar age in years.
Ro_cutoff (float, optional): The critical Rossby number after which
stars retain their rotation period. This is 2.16 in van Saders et
al. (2016) and 2.08 in van Saders et al. (2018). We adopt a
default value of 2.
rossby (Optional[bool]): If True (default), the van Saders (2016)
weakened magnetic braking law will be implemented. If false, the
gyrochronology relation will be used unmodified.
model (Optional[str)]: The gyrochronology model. If "praesepe", the
Praesepe-based gyro model will be used (default) and if "angus15",
the Angus et al. (2015) model will be used.
Returns:
The log10(rotation period) and the standard deviation in dex.
"""
if not rossby: # If Rossby model is switched off
# Standard gyro model
if model == "angus15":
log_P = gyro_model(age, color)
elif model == "praesepe":
log_P = gyro_model_praesepe(age, color)
return log_P
# Otherwise the Rossby model is switched on.
# Calculate the maximum theoretical rotation period for this mass.
pmax = Ro_cutoff * convective_overturn_time(mass)
# Calculate the age this star reaches pmax, based on its B-V color.
if model == "angus15":
# Angus et al. (2015) parameters.
a, b, c, n = [.4, .31, .45, .55]
if color < c:
log10_age_thresh = 10.14 # The age of the Universe
else:
age_thresh_myr = (pmax/(a*(color-c)**b))**(1./n)
log10_age_thresh = np.log10(age_thresh_myr*1e6)
elif model == "praesepe":
log10_age_thresh = age_model(np.log10(pmax), color)
# If star younger than critical age, predict rotation from age and color.
if age < log10_age_thresh:
if model == "angus15":
log_P = gyro_model(age, color)
elif model == "praesepe":
log_P = gyro_model_praesepe(age, color)
# If star older than this age, return maximum possible rotation period.
elif age >= log10_age_thresh:
log_P = np.log10(pmax)
return log_P
def calc_bv(mag_pars):
"""Calculate a B-V colour from stellar parameters.
Calculate B-V colour from stellar parameters [EEP, log10(age, yrs), feh,
distance (in parsecs) and extinction] using MIST isochrones.
Args:
mag_pars (list): A list containing EEP, log10(age) in years,
metallicity, distance in parsecs and V-band extinction, Av, for a
star.
Returns:
B-V color.
"""
_, _, _, bands = mist.interp_mag([*mag_pars], ["B", "V"])
B, V = bands
return B-V
def calc_bprp(mag_pars):
"""Calculate a G_bp-G_rp colour from stellar parameters.
Calculate bp-rp colour from stellar parameters [EEP, log10(age, yrs), feh,
distance (in parsecs) and extinction] using MIST isochrones.
Args:
mag_pars (list): A list containing EEP, log10(age) in years,
metallicity, distance | |
# coding=utf-8
"""
Connections to IQFeed.exe to get different types of data.
This module contains various Conn classes called XXXConn each of
which connects to IQFeed and helps to return market data from it.
Some of the XXXConn classes (like HistoryConn), which provide data
that should be available when requested, provide the data
requested as the return value of the function that requests the
data. Other XXXConn classes (like QuoteConn) which provide streaming
data, require you to implement a class that derives from one of the
Listeners in listeners.py and provide the data by calling lookbacks
in those classes as it comes in.
All XXXConn classes send status messages to listener classes. While
a listener class is not strictly necessary when using something like
HistoryConn, if things aren't working right, you may want to use a
listener to make sure you aren't getting a message telling you why
that you are ignoring.
Data that you are likely to use for analysis is returned as numpy
structured arrays. Other data is normally returned as a namedtuple
specific to that message time.
FeedConn is the base class for all the XXXConn classes.
QuoteConn provides real-time tick-data and real-time news headlines.
AdminConn provides status messages about the status of the Feed etc.
HistoryConn provides historical data.
LookupConn lets you lookup symbols and option and futures chains.
TableConn provides reference data like condition codes and exchanges.
BarConn lets you request real-time interval bars instead of calculating them
yourself from the tick-data (from QuoteConn).
NewsConn lets you get news-headlines in bulk (as opposed to real-time news
which you can get from QuoteConn) and full news stories from the story id.
See http://www.iqfeed.net/dev/main.cfm for more information.
"""
import os
import datetime
import itertools
import select
import socket
import threading
import time
from collections import deque, namedtuple
from typing import Sequence, List
# noinspection PyPep8Naming
import xml.etree.ElementTree as etree
import numpy as np
from .exceptions import NoDataError, UnexpectedField, UnexpectedMessage
from .exceptions import UnexpectedProtocol, UnauthorizedError
from . import field_readers as fr
class FeedConn:
"""
FeedConn is the base class for other XXXConn classes
It handles connecting, disconnecting, sending messages to IQFeed,
reading responses from IQFeed, feed status messages etc.
"""
protocol = "5.2"
iqfeed_host = os.getenv('IQFEED_HOST') or "127.0.0.1"
quote_port = int(os.getenv('IQFEED_PORT_QUOTE') or 5009)
lookup_port = int(os.getenv('IQFEED_PORT_LOOKUP') or 9100)
depth_port = int(os.getenv('IQFEED_PORT_DEPTH') or 9200)
admin_port = int(os.getenv('IQFEED_PORT_ADMIN') or 9300)
deriv_port = int(os.getenv('IQFEED_PORT_DERIV') or 9400)
host = iqfeed_host
port = quote_port
def __init__(self, name: str, host: str, port: int):
self._host = host
self._port = port
self._name = name
self._stop = threading.Event()
self._start_lock = threading.Lock()
self._connected = False
self._reconnect_failed = False
self._pf_dict = {}
self._sm_dict = {}
self._listeners = []
self._buf_lock = threading.RLock()
self._send_lock = threading.RLock()
self._recv_buf = ""
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._read_thread = threading.Thread(group=None, target=self,
name="%s-reader" % self._name,
args=(), kwargs={}, daemon=None)
self._set_message_mappings()
def connect(self) -> None:
"""
Connect to the appropriate socket and start the reading thread.
You must call this before you start using an XXXConn class. If
this thread is not running, no callbacks will be called, no data
will be returned by functions which return data immediately.
"""
self._sock.connect((self._host, self._port))
self._set_protocol(FeedConn.protocol)
self._set_client_name(self.name())
self._send_connect_message()
self.start_runner()
def start_runner(self) -> None:
"""Called to start the reading thread."""
with self._start_lock:
self._stop.clear()
if not self.reader_running():
self._read_thread.start()
def disconnect(self) -> None:
"""
Stop the reading thread and disconnect from the socket to IQFeed.exe
Call this to ensure sockets are closed and we exit cleanly.
"""
self.stop_runner()
if self._sock:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
self._sock = None
def stop_runner(self) -> None:
"""Called to stop the reading and message processing thread."""
with self._start_lock:
self._stop.set()
if self.reader_running():
self._read_thread.join(30)
def reader_running(self) -> bool:
"""
True if the reader thread is running.
If you don't get updates for a while you "may" want to query this
function. Mainly useful for debugging during development of the
library. If the reader thread is crashing, there is likely a bug
in the library or something else is going very wrong.
"""
return self._read_thread.is_alive()
def connected(self) -> bool:
"""
Returns true if IQClient.exe is connected to DTN's servers.
It may take a few seconds after connecting to IQFeed for IQFeed to tell
us it is connected to DTN's servers. During these few seconds, this
function will return False even though it's not actually a problem.
NOTE: It's not telling you if you are connected to IQFeed.exe. It's
telling you if IQFeed.exe is connected to DTN's servers.
"""
return self._connected
def name(self) -> str:
"""Return whatever you named this conn class in the constructor"""
return self._name
def _send_cmd(self, cmd: str) -> None:
with self._send_lock:
# noinspection PyArgumentEqualDefault
self._sock.sendall(cmd.encode(encoding='latin-1', errors='strict'))
def reconnect_failed(self) -> bool:
"""
Returns true if IQClient.exe failed to reconnect to DTN's servers.
It can and does happen that IQClient.exe drops a connection to DTN's
servers and then reconnects. This is not a big problem. But if a
reconnect fails this means there is a big problem and you should
probably pause trading and figure out what's going on with your
network.
"""
return self._reconnect_failed
def __call__(self):
"""The reader thread runs this in a loop."""
while not self._stop.is_set():
if self._read_messages():
self._process_messages()
def _read_messages(self) -> bool:
"""Read raw text sent by IQFeed on socket"""
ready_list = select.select([self._sock], [], [self._sock], 5)
if ready_list[2]:
raise RuntimeError(
"Error condition on socket connection to IQFeed: %s,"
"" % self.name())
if ready_list[0]:
data_recvd = self._sock.recv(1024).decode('latin-1')
with self._buf_lock:
self._recv_buf += data_recvd
return True
return False
def _next_message(self) -> str:
"""Next complete message from buffer of delimited messages"""
with self._buf_lock:
next_delim = self._recv_buf.find('\n')
if next_delim != -1:
message = self._recv_buf[:next_delim].strip()
self._recv_buf = self._recv_buf[(next_delim + 1):]
return message
else:
return ""
def _set_message_mappings(self) -> None:
"""Creates map of message names to processing functions."""
self._pf_dict['E'] = self._process_error
self._pf_dict['T'] = self._process_timestamp
self._pf_dict['S'] = self._process_system_message
self._sm_dict["SERVER DISCONNECTED"] = \
self._process_server_disconnected
self._sm_dict["SERVER CONNECTED"] = self._process_server_connected
self._sm_dict[
"SERVER RECONNECT FAILED"] = self._process_reconnect_failed
self._sm_dict["CURRENT PROTOCOL"] = self._process_current_protocol
self._sm_dict["STATS"] = self._process_conn_stats
def _process_messages(self) -> None:
"""Process the next complete message waiting to be processed"""
message = self._next_message()
while "" != message:
fields = message.split(',')
handle_func = self._processing_function(fields)
handle_func(fields)
message = self._next_message()
def _processing_function(self, fields):
"""Returns the processing function for this specific message."""
pf = self._pf_dict.get(fields[0][0])
if pf is not None:
return pf
else:
return self._process_unregistered_message
def _process_unregistered_message(self, fields: Sequence[str]) -> None:
"""Called if we get a message we don't expect.
Appropriate action here is probably to crash.
"""
err_msg = ("Unexpected message received by %s: %s" % (
self.name(), ",".join(fields)))
raise UnexpectedMessage(err_msg)
def _process_system_message(self, fields: Sequence[str]) -> None:
"""
Called when the next message is a system message.
System messages are messages about the state of the data delivery
system, including IQConnect.exe, DTN servers and connectivity.
"""
assert len(fields) > 1
assert fields[0] == "S"
processing_func = self._system_processing_function(fields)
processing_func(fields)
def _system_processing_function(self, fields):
"""Returns the appropriate system message handling function."""
assert len(fields) > 1
assert fields[0] == "S"
spf = self._sm_dict.get(fields[1])
if spf is not None:
return spf
else:
return self._process_unregistered_system_message
def _process_unregistered_system_message(self,
fields: Sequence[str]) -> None:
"""
Called if we get a system message we don't know how to handle.
Appropriate action here is probably to crash.
"""
err_msg = ("Unexpected message received by %s: %s" % (
self.name(), ",".join(fields)))
raise UnexpectedMessage(err_msg)
def _process_current_protocol(self, fields: Sequence[str]) -> None:
"""
Process the Current Protocol Message
The first message we send IQFeed.exe upon connecting is the
set protocol message. If we get this message and the protocol
IQFeed tells us it's using does not match the expected protocol
then the we really need to shutdown, fix the version mismatch by
upgrading/downgrading IQFeed.exe and this library so they match.
"""
assert len(fields) > 2
assert fields[0] == "S"
assert fields[1] == "CURRENT PROTOCOL"
protocol = fields[2]
if protocol != FeedConn.protocol:
err_msg = ("Desired Protocol %s, Server Says Protocol %s in %s" % (
FeedConn.protocol, protocol, self.name()))
raise UnexpectedProtocol(err_msg)
def _process_server_disconnected(self, fields: Sequence[str]) -> None:
"""Called when IQFeed.exe disconnects from DTN's servers."""
assert len(fields) > 1
assert fields[0] == "S"
assert fields[1] == "SERVER DISCONNECTED"
self._connected = False
for listener in self._listeners:
listener.feed_is_stale()
def _process_server_connected(self, fields: Sequence[str]) -> None:
"""Called when IQFeed.exe connects or re-connects to DTN's servers."""
assert len(fields) > | |
import os
import numpy as np
import matplotlib.pyplot as plt
from . import helper_generic as hlp
from . import helper_hh_model as hh
from . import helper_mkz_model as mkz
from . import helper_site_response as sr
class Curve:
"""
Class implementation of a strain-dependent curve. It can be a stress-strain
curve, a G/Gmax curve as a function of strain, or a damping curve as
a function of strain.
Parameters
----------
data : numpy.ndarray
A 2D numpy array with 2 columns. Its 0th column contains the strain
array, and the 1st column contains the accompanying values (such as
stress, or G/Gmax).
strain_unit : {'1', '%'}
The unit of the strain.
interpolate : bool
Whether to interpolate the input curve or not. If ``False``, the
following several parameters (``min_strain``, ``max_strain``,
``n_pts``, ``log_scale``) have no effects.
min_strain : float
Minimum strain value of the strain array. If ``interpolate`` is ``True``,
the raw ``data`` will be internally interpolated at a strain array
defined by ``min_strain``, ``max_strain``, and ``n_pts``.
max_strain : float
Maximum strain value of the strain array. Only effective when
``interpolate`` is set to ``True``.
n_pts : int
Number of points of the desired strain array to do the interpolation.
Only effective when ``interpolate`` is set to ``True``.
log_scale : bool
Whether the strain array for interpolation is in log scale (or linear
scale). Only effective when ``interpolate`` is set to ``True``.
check_values : bool
Whether to ensure that all values in ``data`` >= 0 when a class object
is being constructed.
Attributes
----------
raw_data : numpy.ndarray
The raw data that the user passed in
strain : numpy.array
The strain array at which interpolation happens, a 1D numpy array of
shape (``n_pts``, ). The unit is percent (unit conversion happens
internally if applicable).
values : numpy.array
The interpolated values; same shape as ``strain``
"""
def __init__(
self, data, *, strain_unit='%', interpolate=False,
min_strain=0.0001, max_strain=10., n_pts=50, log_scale=True,
check_values=True,
):
hlp.check_two_column_format(data, '`curve`', ensure_non_negative=check_values)
if interpolate:
strain, values = hlp.interpolate(
min_strain, max_strain, n_pts, data[:, 0], data[:, 1],
log_scale=log_scale,
)
else:
strain, values = data[:, 0], data[:, 1]
if strain_unit not in ['1', '%']:
raise ValueError("`strain_unit` must be '1' or '%'.")
if strain_unit == '1':
strain *= 100 # strain values are internally stored in unit of %
self.raw_data = data
self.strain = strain
self.values = values
def __repr__(self):
return '%s object:\n%s' % (self.__class__, str(self.raw_data))
def plot(
self, plot_interpolated=True, fig=None, ax=None, title=None,
xlabel='Strain [%]', ylabel=None, figsize=(3, 3), dpi=100,
**kwargs_to_matplotlib,
):
"""
Plot the curve (y axis: values, x axis: strain)
Parameters
----------
plot_interpolated : bool
Whether to plot the interpolated curve or the raw data.
fig : matplotlib.figure.Figure or ``None``
Figure object. If None, a new figure will be created.
ax : matplotlib.axes._subplots.AxesSubplot or ``None``
Axes object. If None, a new axes will be created.
title : str
Title of plot.
xlabel : str
X label of plot.
ylabel : str
Y label of plot.
figsize: (float, float)
Figure size in inches, as a tuple of two numbers. The figure
size of ``fig`` (if not ``None``) will override this parameter.
dpi : float
Figure resolution. The dpi of ``fig`` (if not ``None``) will
override this parameter.
**kwargs_to_matplotlib :
Keyword arguments to be passed to ``matplotlib.pyplot.plot()``.
Returns
-------
fig : matplotlib.figure.Figure
The figure object being created or being passed into this function.
ax : matplotlib.axes._subplots.AxesSubplot
The axes object being created or being passed into this function.
"""
fig, ax = hlp._process_fig_ax_objects(fig, ax, figsize=figsize, dpi=dpi)
if plot_interpolated:
ax.semilogx(self.strain, self.values, **kwargs_to_matplotlib)
else:
ax.semilogx(
self.raw_data[:, 0], self.raw_data[:, 1], **kwargs_to_matplotlib,
)
ax.grid(ls=':')
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
if title:
ax.set_title(title)
return fig, ax
class GGmax_Curve(Curve):
"""
Class implementation of a G/Gmax curve, as a function of shear strain.
Parameters
----------
data : numpy.ndarray
A 2D numpy array with 2 columns. Its 0th column contains the strain
array, and the 1st column contains the G/Gmax values.
strain_unit : {'1', '%'}
The unit of the strain.
interpolate : bool
Whether to interpolate the input curve or not. If ``False``, the
following several parameters (``min_strain``, ``max_strain``,
``n_pts``, ``log_scale``) have no effects.
min_strain : float
Minimum strain value of the strain array. If ``interpolate`` is ``True``,
the raw ``data`` will be internally interpolated at a strain array
defined by ``min_strain``, ``max_strain``, and ``n_pts``.
max_strain : float
Maximum strain value of the strain array. Only effective when
``interpolate`` is set to ``True``.
n_pts : int
Number of points of the desired strain array to do the interpolation.
Only effective when ``interpolate`` is set to ``True``.
log_scale : bool
Whether the strain array for interpolation is in log scale (or linear
scale). Only effective when ``interpolate`` is set to ``True``.
check_values : bool
Whether to automatically check the validity of the G/Gmax values (i.e.,
between 0 and 1).
Attributes
----------
raw_data : numpy.ndarray
The raw data that the user passed in.
strain : numpy.array
The strain array at which interpolation happens, a 1D numpy array of
shape (``n_pts``, ). The unit is percent (unit conversion happens
internally if applicable).
GGmax : numpy.array
The interpolated G/Gmax values; same shape as ``strain``.
"""
def __init__(
self, data, *, strain_unit='%', interpolate=False,
min_strain=0.0001, max_strain=10., n_pts=50, log_scale=True,
check_values=True,
):
super(GGmax_Curve, self).__init__(
data,
strain_unit=strain_unit,
interpolate=interpolate,
min_strain=min_strain,
max_strain=max_strain,
n_pts=n_pts,
log_scale=log_scale,
check_values=check_values,
)
self.GGmax = self.values
if check_values and (np.any(self.GGmax > 1) or np.any(self.GGmax < 0)):
raise ValueError('The provided G/Gmax values must be between [0, 1].')
class Damping_Curve(Curve):
"""
Class implementation of a damping curve, as a function of shear strain.
Parameters
----------
data : numpy.ndarray
A 2D numpy array with 2 columns. Its 0th column contains the strain
array, and the 1st column contains the G/Gmax values.
strain_unit : {'1', '%'}
The unit of the strain.
damping_unit : {'1', '%'}
The unit of damping.
interpolate : bool
Whether to interpolate the input curve or not. If ``False``, the
following several parameters (``min_strain``, ``max_strain``,
``n_pts``, ``log_scale``) have no effects.
min_strain : float
Minimum strain value of the strain array. If ``interpolate`` is ``True``,
the raw ``data`` will be internally interpolated at a strain array
defined by ``min_strain``, ``max_strain``, and ``n_pts``.
max_strain : float
Maximum strain value of the strain array. Only effective when
``interpolate`` is set to ``True``.
n_pts : int
Number of points of the desired strain array to do the interpolation.
Only effective when ``interpolate`` is set to ``True``.
log_scale : bool
Whether the strain array for interpolation is in log scale (or linear
scale). Only effective when ``interpolate`` is set to ``True``.
check_values : bool
Whether to automatically check the validity of the damping values (i.e.,
between 0 and 1).
Attributes
----------
raw_data : numpy.ndarray
The raw data that the user passed in.
strain : numpy.array
The strain array at which interpolation happens, a 1D numpy array of
shape (``n_pts``, ). The unit is percent (unit conversion happens
internally if applicable).
damping : numpy.array
The interpolated damping values; same shape as ``strain``. The unit is
percent (unit conversion happens internally if applicable).
"""
def __init__(
self, data, *, strain_unit='%', damping_unit='%',
interpolate=False, min_strain=0.0001, max_strain=10.,
n_pts=50, log_scale=True, check_values=True,
):
super(Damping_Curve, self).__init__(
data,
strain_unit=strain_unit,
interpolate=interpolate,
min_strain=min_strain,
max_strain=max_strain,
n_pts=n_pts,
log_scale=log_scale,
check_values=check_values,
)
self.damping = self.values
if damping_unit not in ['1', '%']:
raise ValueError("`damping_unit` must be '1' or '%'.")
if damping_unit == '1':
self.damping *= 100 # unit: 1 --> %
if check_values and (np.any(self.damping > 100) or np.any(self.damping < 0)):
raise ValueError('The provided damping values must be between [0, 100].')
def get_HH_x_param(
self, use_scipy=True, pop_size=800, n_gen=100,
lower_bound_power=-4, upper_bound_power=6, eta=0.1,
seed=0, show_fig=False, verbose=False, parallel=False,
n_cores=None,
):
"""
Obtain the HH_x parameters from the damping curve data, using the
genetic algorithm provided in DEAP.
Parameters
----------
use_scipy : bool
Whether to use the "differential_evolution" algorithm implemented
in scipy
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.differential_evolution.html)
to perform optimization. If ``False``, use the algorithm in the
DEAP package.
pop_size : int
The number of individuals in a generation.
n_gen : int
Number of generations that the evolution lasts.
lower_bound_power | |
# %%
import os
import sys
from collections import Counter
from datetime import datetime, timedelta
from pathlib import Path
from zipfile import ZipFile
# data wrangling
import geopandas as gpd
from numpy.lib.utils import deprecate
import pandas as pd
import json
import numpy as np
import requests
from urllib.error import HTTPError
from etl import geo_pipe as geo
import cons
# config options
timestr = datetime.now().strftime("%Y_%m_%d")
dl_home = Path(Path.home() / 'Downloads')
node_dir = Path(f'{dl_home}/Naptan_Data/{timestr}_naptan_nodes/')
nptg_dir = Path(f'{dl_home}/Naptan_Data/{timestr}_nptg/')
geojson_dir = Path(f"{dl_home}/Naptan_Geojson/{timestr}_geojson/")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 5)
# declare geo coordinates conversion type.
crs = "EPSG:4326"
# %%
def stop_area_atcocode_identification(gdf):
"""[summary] identifies if which naptan stops are part of a stop group and
which ones are not part of a stop group. A transform
Args:
gdf ([geopandas]): [description]
Raises:
NotImplementedError: [description]
NotImplementedError: [description]
e: [description]
ve: [description]
OSError: [description]
file_missing: [description]
NotImplementedError: [description]
NotImplementedError: [description]
ve: [description]
Returns:
[type]: [description]
"""
#
stop_groups = gdf['StopAreaCode'].str[3:4].str.contains("g|G")
#
gdf_subset = gdf[stop_groups]
return gdf_subset
# %%
def define_stops_areas(gdf,
loc_codes,
admin_codes):
"""[summary] Given combining a commonname and indicator with stop area data
, you should be able to define what a stop area is. By using the ngpt data
you can cross reference that to a specific locality.
Args:
gdf ([geopandas dataframe]): [description]
loc_codes ([locality codes]): [description]
admin_codes ([admin codes]): [description]
Raises:
NotImplementedError: [description]
Returns:
[gdf]: [A geopandas dataframe with stop area definitions.]
"""
# TODO - this needs finishing, need to add nptg stop area data linkaging
# functionality.
gdf['stop_Area'] = gdf['StopPoint'] + loc_codes[''] + admin_codes['']
return gdf
raise NotImplementedError
# %%
def intersect_list_to_masks(df, col_name, value_list):
"""[summary] This is a filter function, that performs an inner join when
given a list object and returns a filtered dataframe of the values in the
given list. You must pass a valid column name and a valid list of strings
expected in that column, will filter out all values not in the list
from the given column, returning a dataframe with only the found entries,
that match the list given values in the given column.
Arguments:
colName {[str]} -- [the pandas column name, as a string]
value_list {[list]} -- [the list of strings to filter the dataframe.]
Returns:
[gdf] -- [a filtered gdf, with only the found list values within. ]
"""
# uses numpy 1d intersection to filter an entire list of strings
mask = df[col_name].apply(lambda x: np.intersect1d(x,
value_list).size > 0)
failed_nodes = df[mask]
return failed_nodes
# %%
def file_checker(file_locale, file_names, file_ext):
"""[summary] Lists the Naptan files available at the specified location.
If some files are missing/ or can't be open this should flag a warning.
Arguments:
Args:
file_locale ([type]): [description]
file_names ([type]): [description]
file_ext ([type]): [description]
Returns:
[type]: [description]
"""
# we print out if all the expected files are found and if so in the system
# where.
# making sure we let the user know what files have been found and where
# if anything is missing we let them know that too.
try:
for expected in file_locale.iterdir():
f = Path(f'{expected}')
# check we can open the file, if this doesn't work, we know the
# files are missing.
fg = open(f)
fg.close()
# we check the files exist and are valid files of the expected type
if f.exists and f.is_file and 'nptg' in str(file_locale):
print(f'NPTG {f.stem} a {f.suffix} has been found.')
# check for the node files
elif 'nodes' in str(file_locale) and f.exists and f.is_file:
print(f'Naptan Node {f.stem} {f.suffix} has been located.')
else:
sys.exit(f'Naptan file {f.stem} is missing.')
except IOError:
sys.exit(f'{file_locale} is not accessible.')
except OSError:
sys.exit(f'Naptan files {file_names} is wrong.')
except ValueError as ve:
sys.exit(f'{ve}')
# %%
def file_verification(ext):
"""[summary] runs file verification checks on the naptan and nptg
files.
Arguments:
ext {[str]} -- [description]
Raises:
file_missing: [description]
NotImplementedError: [description]
NotImplementedError: [description]
Returns:
[type]: [description]
"""
nptg_file_names = ['AdjacentLocality',
'AdminAreas',
'Districts',
'Localities',
'LocalityAlternativeNames',
'LocalityHierarchy',
'PlusbusMapping',
'PlusbusZones',
'Regions']
naptan_file_names = ['AirReferences',
'AlternativeDescriptors',
'AreaHierarchy',
'CoachReferences',
'FerryReferences',
'Flexible',
'HailRide',
'LocalityMainAccessPoints',
'MetroReferences',
'RailReferences',
'StopAreas',
'StopAvailability',
'StopLocalities',
'StopPlusbusZones',
'Stops',
'StopsInArea']
return file_checker(node_dir,
naptan_file_names,
ext), file_checker(nptg_dir,
nptg_file_names,
ext)
# %%
def folder_creator(folder):
"""[summary] Makes a folder if one for the given paths doesn't already
exists.
Args:
folder ([Path]): [description]
Returns:
[path]: [Created folder at the given path.]
"""
try:
if not folder.exists():
Path(f'{folder}').mkdir(parents=True,
exist_ok=True)
print(f"{folder} folder has being created.")
except OSError as ose:
sys.exit(f'{ose}')
except FileExistsError:
print(f'The {folder} file has been created.')
except Exception as e:
raise e
sys.exit(f'{e} Report creation failed')
finally:
return folder
# %%
def downloader(file, url):
"""[summary] It checks the given datasource is accessible and if so downloads
the files to the users download folder.
Args:
file ([str]): [The file (usually zip) we are downloading.]
url ([str]): [The location of the file.]
Raises:
NotImplementedError: [exception support]
ve: [description]
"""
try:
# let's check if the file exists already on the local system.
if not file.exists():
print(f'Downloading the {timestr} {file} Dataset.')
else:
return(f'{file} data for {timestr} has been downloaded.')
# then defensively resolve our errors,
except ConnectionError as ce:
sys.exit(f' {ce} No internet connection was found.')
except ConnectionRefusedError as cre:
sys.exit(f'{cre} This system is not allowed to access the Naptan Site.')
except HTTPError as httperror:
sys.exit(f'{httperror} the Naptan download server is unavailable.')
except ValueError as ve:
raise ve
sys.exit('Site is not valid.')
finally:
# assumes success of the above.
dir = Path(dl_home, "/Naptan_Data/")
response = requests.get(url)
# we overwrite to avoid appending the same data multiple times to the
# same file
with open(os.path.join(dir, file), 'wb') as f:
f.write(response.content)
response.close()
# %%
def naptan_data_source(naptan_data,
format='csv',
local_authority_codes='None'):
"""[summary] As the Naptan and nptg can refer to different formats and
naptan can either be the whole dataset or a specific subset relating to a
given local area authority.
Args:
naptan_data (str): [description]. Defaults to [].
format (str, optional): [description]. Defaults to 'csv'.
local_authority_code ([type], optional): [description].
Defaults to None.
Raises:
NotImplementedError: [XML download is not currently supported.]
"""
# base naptan path for nptg and naptan node data.
base = 'https://naptan.app.dft.gov.uk/'
try:
file = Path(f'{dl_home}/{timestr}_{naptan_data}.zip')
# let's check the file exists and is readable.
if Path.exists(file) and file.stat().st_size != 0:
print(f'Naptan Data for {timestr} has already been downloaded.')
elif naptan_data == 'naptan_nodes':
# base for just node data.
naptan_base = f'{base}DataRequest/Naptan.ashx'
# the below filters url creation to create a locality list codes.
if local_authority_codes.isdigit():
url = (f'{naptan_base}{format}&LA={local_authority_codes}')
elif format == 'xml':
# will just be the base url.
url = naptan_base
else:
url = (f'{naptan_base}?format={format}')
print(f'Downloading all Naptan Node {format} data.')
return downloader(file, url)
elif naptan_data == 'nptg':
# downloads the nptg data.
print(f'Downloading the {naptan_data} data.')
file = Path(f'{dl_home}/{timestr}_{naptan_data}.zip')
url = f'{base}datarequest/nptg.ashx?format={format}'
return downloader(file, url)
except OSError:
raise OSError
# %%
def extract_naptan_files(zip_file):
"""[summary] Extracts the downloaded zip file.
Arguments:
zip_file {[path]} -- [description]
"""
dest = Path(f'{dl_home}/Naptan_Data/{zip_file.stem}/')
try:
# Check that the directory exists, that it is a directory and it's not
# empty.
if dest.exists() and dest.is_dir() and any(Path(dest).iterdir()):
print(f'Already Extracted {timestr} Naptan Data to {dest}.')
pass
if zip_file.is_file() and zip_file.suffix == '.zip':
print(f'Extracting all {zip_file} files in archive.')
except FileNotFoundError:
folder_creator(Path(f'{dest}'))
print(f'{dest.stem} folder is being created.')
except FileExistsError:
sys.exit('File already exists')
except Exception as e:
sys.exit(e)
finally:
with ZipFile(zip_file, "r") as zipobj:
# Extract all the contents of zip file in the working directory
zipobj.extractall(dest)
print(f'Extracted all files to {dest}')
# %%
def deactivated_nodes(df):
"""[summary] - Returns a dataframe of only active, pending, or new nodes
or deleted stops from the last 3 years, for representative sampling.
deleted nodes are removed for the sake of this test. This test is also not,
concerned with reporting errors, as this is a data cleaning function
Arguments:
df {[geopanda dataframe]} -- [The Naptan master dataframe.]
Returns:
[type] -- [description]
"""
# TODO filter this to stops with a modification date time within the last 3
# years so that there is a represenative sample of deactivated stops.
try:
exp_date = (datetime.now() - timedelta(days=365*3))
# we filter all the missing deleted stops that are older than 3 yrs.
mask = ~((df['Status'] == 'del') &
(df['ModificationDateTime'] <= exp_date))
active_nodes = df[mask]
# TODO needs to be integrated with reporting function.
# inactive_nodes = df[~mask]
| |
<reponame>smegurus/smegurus-django
import json
from django.core import mail
from django.db import transaction
from django.core.urlresolvers import resolve, reverse
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.utils import translation
from django.contrib.auth.models import User, Group
from rest_framework.authtoken.models import Token
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework.test import APITestCase
from django_tenants.test.cases import TenantTestCase
from django_tenants.test.client import TenantClient
from foundation_tenant.models.base.me import Me
from foundation_tenant.models.base.postaladdress import PostalAddress
from foundation_tenant.models.base.contactpoint import ContactPoint
from foundation_tenant.models.base.intake import Intake
from foundation_tenant.models.base.note import Note
from smegurus import constants
TEST_USER_EMAIL = "<EMAIL>"
TEST_USER_USERNAME = "ledo"
TEST_USER_PASSWORD = "<PASSWORD>"
class APIIntakeWithTenantSchemaTestCase(APITestCase, TenantTestCase):
fixtures = []
def setup_tenant(self, tenant):
"""Tenant Schema"""
tenant.schema_name = 'galacticalliance'
tenant.name = "Galactic Alliance of Humankind"
tenant.has_perks=True
tenant.has_mentors=True
tenant.how_discovered = "Command HQ"
tenant.how_many_served = 1
@classmethod
def setUpTestData(cls):
Group.objects.bulk_create([
Group(id=constants.ENTREPRENEUR_GROUP_ID, name="Entreprenuer",),
Group(id=constants.MENTOR_GROUP_ID, name="Mentor",),
Group(id=constants.ADVISOR_GROUP_ID, name="Advisor",),
Group(id=constants.ORGANIZATION_MANAGER_GROUP_ID, name="Org Manager",),
Group(id=constants.ORGANIZATION_ADMIN_GROUP_ID, name="Org Admin",),
Group(id=constants.CLIENT_MANAGER_GROUP_ID, name="Client Manager",),
Group(id=constants.SYSTEM_ADMIN_GROUP_ID, name="System Admin",),
])
org_admin_group = Group.objects.get(id=constants.ORGANIZATION_ADMIN_GROUP_ID)
user = User.objects.create_user( # Create our user.
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=<PASSWORD>
)
user.is_superuser = True
user.is_active = True
user.groups.add(org_admin_group)
user.save()
@transaction.atomic
def setUp(self):
translation.activate('en') # Set English.
super(APIIntakeWithTenantSchemaTestCase, self).setUp()
# Initialize our test data.
self.user = User.objects.get()
token = Token.objects.get(user__username=TEST_USER_USERNAME)
# Setup.
self.unauthorized_client = TenantClient(self.tenant)
self.authorized_client = TenantClient(self.tenant, HTTP_AUTHORIZATION='Token ' + token.key)
self.authorized_client.login(
username=TEST_USER_USERNAME,
password=<PASSWORD>
)
self.tenant.owner = self.user
self.tenant.save()
self.me = Me.objects.create(
owner=self.user,
)
# Above taken from:
# http://www.django-rest-framework.org/api-guide/testing/#authenticating
@transaction.atomic
def tearDown(self):
Intake.objects.delete_all()
Note.objects.delete_all()
PostalAddress.objects.delete_all()
ContactPoint.objects.delete_all()
Me.objects.delete_all()
items = User.objects.all()
for item in items.all():
item.delete()
items = Group.objects.all()
for item in items.all():
item.delete()
# super(APIIntakeWithTenantSchemaTestCase, self).tearDown()
@transaction.atomic
def test_list_with_anonymous_user(self):
response = self.unauthorized_client.get('/api/tenantintake/?format=json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@transaction.atomic
def test_list_with_authenticated__user(self):
# Change Group that the User belongs in.
entrepreneur_group = Group.objects.get(id=constants.ENTREPRENEUR_GROUP_ID)
self.user.groups.add(entrepreneur_group)
self.user.save()
# Test and verify.
response = self.authorized_client.get('/api/tenantintake/?format=json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
@transaction.atomic
def test_list_with_authenticated_management_group_user(self):
# Change Group that the User belongs in.
org_admin_group = Group.objects.get(id=constants.ORGANIZATION_ADMIN_GROUP_ID)
self.user.groups.remove(org_admin_group)
self.user.save()
# Test and verify.
response = self.authorized_client.get('/api/tenantintake/?format=json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
@transaction.atomic
def test_list_with_authenticated_advisor_group_user(self):
# Change Group that the User belongs in.
advisor_group = Group.objects.get(id=constants.ADVISOR_GROUP_ID)
self.user.groups.add(advisor_group)
self.user.save()
# Test and verify.
response = self.authorized_client.get('/api/tenantintake/?format=json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
@transaction.atomic
def test_post_with_anonymous_user(self):
data = {
'me': self.me.id,
}
response = self.unauthorized_client.post('/api/tenantintake/?format=json', json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@transaction.atomic
def test_post_with_authenticated_management_group_user(self):
# Run the test and verify.
data = {
'me': self.me.id,
}
response = self.authorized_client.post('/api/tenantintake/?format=json', json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
@transaction.atomic
def test_post_with_authenticated_advisor_group_user(self):
# Change Group that the User belongs in.
org_admin_group = Group.objects.get(id=constants.ORGANIZATION_ADMIN_GROUP_ID)
advisor_group = Group.objects.get(id=constants.ADVISOR_GROUP_ID)
self.user.groups.remove(org_admin_group)
self.user.groups.add(advisor_group)
self.user.save()
# Test and verify.
data = {
'me': self.me.id,
}
response = self.authorized_client.post('/api/tenantintake/?format=json', json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
@transaction.atomic
def test_put_with_anonymous_user(self):
# Create a new object with our specific test data.
Intake.objects.create(
id=1,
me=self.me,
)
# Run the test.
data = {
'id': 1,
'me': self.me.id,
}
response = self.unauthorized_client.put('/api/tenantintake/1/?format=json', json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@transaction.atomic
def test_put_with_authenticated_management_user(self):
# Create a new object with our specific test data.
Intake.objects.create(
id=1,
me=self.me,
)
# Run the test.
data = {
'id': 1,
'me': self.me.id,
}
response = self.authorized_client.put('/api/tenantintake/1/?format=json', json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
@transaction.atomic
def test_put_with_authenticated_advisor_user(self):
# Change Group that the User belongs in.
org_admin_group = Group.objects.get(id=constants.ORGANIZATION_ADMIN_GROUP_ID)
advisor_group = Group.objects.get(id=constants.ADVISOR_GROUP_ID)
self.user.groups.remove(org_admin_group)
self.user.groups.add(advisor_group)
self.user.save()
# Create a new object with our specific test data.
Intake.objects.create(
id=1,
me=self.me,
)
# Run the test.
data = {
'id': 1,
'me': self.me.id,
}
response = self.authorized_client.put('/api/tenantintake/1/?format=json', json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
@transaction.atomic
def test_delete_with_anonymous_user(self):
Intake.objects.create(
id=1,
me=self.me,
)
response = self.unauthorized_client.delete('/api/tenantintake/1/?format=json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@transaction.atomic
def test_delete_with_authenticated_management_user(self):
Intake.objects.create(
id=1,
me=self.me,
judgement_note=Note.objects.create(
id=1,
me=self.me,
),
privacy_note=Note.objects.create(
id=2,
me=self.me,
),
terms_note=Note.objects.create(
id=3,
me=self.me,
),
confidentiality_note=Note.objects.create(
id=4,
me=self.me,
),
collection_note=Note.objects.create(
id=5,
me=self.me,
),
)
response = self.authorized_client.delete('/api/tenantintake/1/?format=json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
@transaction.atomic
def test_delete_with_authenticated_advisor_user(self):
# Create our object to be deleted.
Intake.objects.create(
id=1,
me=self.me,
status=constants.CREATED_STATUS,
judgement_note=Note.objects.create(
id=1,
me=self.me,
),
)
# Change Group that the User belongs in.
org_admin_group = Group.objects.get(id=constants.ORGANIZATION_ADMIN_GROUP_ID)
advisor_group = Group.objects.get(id=constants.ADVISOR_GROUP_ID)
self.user.groups.remove(org_admin_group)
self.user.groups.add(advisor_group)
self.user.save()
# Run test and verify.
response = self.authorized_client.delete('/api/tenantintake/1/?format=json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
@transaction.atomic
def test_complete_intake_with_anonymous_user(self):
# Setup our object.
Intake.objects.create(
id=1,
me=self.me,
status=constants.PENDING_REVIEW_STATUS,
judgement_note=Note.objects.create(
id=1,
me=self.me,
),
)
# Run the test and verify.
response = self.unauthorized_client.put(
'/api/tenantintake/1/complete_intake/?format=json',
json.dumps({}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
me = Intake.objects.get(id=1)
self.assertEqual(me.status, constants.PENDING_REVIEW_STATUS)
self.assertEqual(len(mail.outbox), 0) # Test that one message has not been sent.
@transaction.atomic
def test_complete_intake_with_owner_user(self):
# Setup our object.
Intake.objects.create(
id=1,
me=self.me,
status=constants.CREATED_STATUS,
)
# Run the test and verify.
response = self.authorized_client.put(
'/api/tenantintake/1/complete_intake/?format=json',
json.dumps({}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
me = Intake.objects.get(id=1)
self.assertEqual(me.status, constants.PENDING_REVIEW_STATUS)
# Test that one email has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that the subject of the first message is correct.
self.assertEqual(mail.outbox[0].subject, 'New Entrepreneur Application!')
@transaction.atomic
def test_complete_intake_with_different_owner_user(self):
# Setup our objects.
org_admin_group = Group.objects.get(id=constants.ORGANIZATION_ADMIN_GROUP_ID)
new_user = User.objects.create_user( # Create our user.
email='<EMAIL>',
username='Chambers',
password='<PASSWORD>',
)
new_user.is_active = True
new_user.groups.add(org_admin_group)
new_user.save()
new_me = Me.objects.create(
owner=new_user
)
Intake.objects.create(
id=1,
me=new_me,
status=constants.CREATED_STATUS,
)
# Run the test and verify.
response = self.authorized_client.put(
'/api/tenantintake/1/complete_intake/?format=json',
json.dumps({}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
me = Intake.objects.get(id=1)
self.assertEqual(me.status, constants.CREATED_STATUS)
self.assertEqual(len(mail.outbox), 0) # Test that one message has not been sent.
@transaction.atomic
def test_complete_intake_with_owner_user_with_404(self):
# Setup our object.
Intake.objects.create(
id=1,
me=self.me,
status=constants.CREATED_STATUS,
)
# Run the test and verify.
response = self.authorized_client.put(
'/api/tenantintake/6666/complete_intake/?format=json',
json.dumps({}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
me = Intake.objects.get(id=1)
self.assertEqual(me.status, constants.CREATED_STATUS)
self.assertEqual(len(mail.outbox), 0) # Test that one message has not been sent.
@transaction.atomic
def test_judge_with_anonymous_user(self):
# Create a new object with our specific test data.
# Setup our object.
Intake.objects.create(
id=1,
me=self.me,
status=constants.CREATED_STATUS,
)
# Run the test.
data = {
'id': 1,
'owner': self.user.id,
'is_employee_created': False,
}
response = self.unauthorized_client.put(
'/api/tenantintake/1/judge/?format=json',
json.dumps(data),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
intake = Intake.objects.get(id=1)
self.assertEqual(intake.status, constants.CREATED_STATUS)
self.assertFalse(intake.me.is_in_intake)
self.assertEqual(len(mail.outbox), 0) # Test that one message has not been sent.
@transaction.atomic
def test_judge_with_employee_user_for_existing_intake_with_note(self):
# Setup our object.
Intake.objects.create(
id=1,
me=self.me,
status=constants.CREATED_STATUS,
judgement_note=Note.objects.create(
me=self.me,
),
)
# Run the test and verify.
response = self.authorized_client.put(
'/api/tenantintake/1/judge/?format=json',
json.dumps({
'status': constants.APPROVED_STATUS,
'comment': 'This is a test comment.',
'is_employee_created': False,
}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
intake = Intake.objects.get(id=1)
self.assertEqual(intake.status, constants.APPROVED_STATUS)
self.assertTrue(intake.me.is_in_intake)
note = Note.objects.get(id=1)
self.assertIn('This is a test comment.', note.description)
self.assertEqual(len(mail.outbox), 1) # Test that one message has been sent.
self.assertIn('Accepted', mail.outbox[0].subject)
@transaction.atomic
def test_judge_with_employee_user_for_existing_intake_without_note(self):
# Setup our object.
Intake.objects.create(
id=1,
me=self.me,
status=constants.CREATED_STATUS,
)
# Run the test and verify.
response = self.authorized_client.put(
'/api/tenantintake/1/judge/?format=json',
json.dumps({
'status': constants.REJECTED_STATUS,
'comment': 'This is a test comment.',
'is_employee_created': False,
}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
intake = Intake.objects.get(id=1)
self.assertEqual(intake.status, constants.REJECTED_STATUS)
self.assertFalse(intake.me.is_in_intake)
note = Note.objects.get(id=1)
self.assertIn('This is a test comment.', note.description)
self.assertEqual(len(mail.outbox), 1) # Test that one message has been sent.
self.assertIn('Rejected', mail.outbox[0].subject)
@transaction.atomic
def test_judge_with_employee_user_for_manually_created_intake(self):
# Setup our object.
Intake.objects.create(
id=1,
me=self.me,
status=constants.CREATED_STATUS,
)
# Run the test and verify.
response = self.authorized_client.put(
'/api/tenantintake/1/judge/?format=json',
json.dumps({
'status': constants.APPROVED_STATUS,
'comment': 'This is a test comment.',
'is_employee_created': True,
}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
intake = Intake.objects.get(id=1)
self.assertEqual(intake.status, constants.APPROVED_STATUS)
self.assertTrue(intake.me.is_in_intake)
note = Note.objects.get(id=1)
self.assertIn('This is a test comment.', note.description)
self.assertEqual(len(mail.outbox), 1) # Test that one message has been sent.
@transaction.atomic
def test_judge_with_non_employee_user(self):
org_admin_group = Group.objects.get(id=constants.ORGANIZATION_ADMIN_GROUP_ID)
self.user.groups.remove(org_admin_group)
group = Group.objects.get(id=constants.ENTREPRENEUR_GROUP_ID)
self.user.groups.add(group)
Intake.objects.create(
id=1,
me=self.me,
status=constants.CREATED_STATUS,
)
# Run the test.
response = self.authorized_client.put(
'/api/tenantintake/1/judge/?format=json',
json.dumps({
'status': constants.APPROVED_STATUS,
'comment': 'This is a test comment.',
'is_employee_created': False,
}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
intake = Intake.objects.get(id=1)
self.assertEqual(intake.status, constants.CREATED_STATUS)
self.assertFalse(intake.me.is_in_intake)
self.assertEqual(len(mail.outbox), 0) # Test that one message has not been sent.
@transaction.atomic
def test_judge_with_owner_user_with_404(self):
org_admin_group = Group.objects.get(id=constants.ORGANIZATION_ADMIN_GROUP_ID)
self.user.groups.remove(org_admin_group)
group = Group.objects.get(id=constants.ADVISOR_GROUP_ID)
self.user.groups.add(group)
response = self.authorized_client.put(
'/api/tenantintake/666/judge/?format=json',
json.dumps({
'status': constants.APPROVED_STATUS,
'comment': 'This is a test comment.',
'is_employee_created': False,
}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn(b'No Intake matches the given query.', response.content)
self.assertEqual(len(mail.outbox), 0) # Test that one message has not been sent.
@transaction.atomic
def test_crm_update_with_anonymous_user(self):
# Setup our object.
Intake.objects.create(
id=1,
me=self.me,
status=constants.PENDING_REVIEW_STATUS,
has_signed_with_name="Ledo"
)
# Run the test and verify.
response = self.unauthorized_client.put(
'/api/tenantintake/1/crm_update/?format=json',
json.dumps({}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@transaction.atomic
def test_crm_update_with_owner_user(self):
# Setup our object.
Intake.objects.create(
id=1,
me=self.me,
status=constants.CREATED_STATUS,
has_signed_with_name="Ledo"
)
# Run the test and verify.
response = self.authorized_client.put(
'/api/tenantintake/1/crm_update/?format=json',
json.dumps({}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@transaction.atomic
def test_crm_update_with_different_owner_user(self):
# Setup our objects.
org_admin_group = Group.objects.get(id=constants.ORGANIZATION_ADMIN_GROUP_ID)
new_user = User.objects.create_user( # Create our user.
email='<EMAIL>',
username='Chambers',
password='<PASSWORD>',
)
new_user.is_active = True
new_user.groups.add(org_admin_group)
new_user.save()
new_me = Me.objects.create(
owner=new_user
)
Intake.objects.create(
id=1,
me=new_me,
status=constants.CREATED_STATUS,
has_signed_with_name="Ledo"
)
# Run the test and verify.
response = self.authorized_client.put(
'/api/tenantintake/1/crm_update/?format=json',
json.dumps({}),
content_type='application/json'
| |
<filename>flask_server/routes.py
from flask import g, render_template, flash, redirect, url_for, request
from flask_login import login_user, logout_user, current_user, login_required
from werkzeug.urls import url_parse
from flask_server import flask_server, db
from flask_server.forms import (
LoginForm,
RegistrationForm,
PostForm,
UpdateForm,
ResetPWForm,
EditProfileForm,
)
from flask_server.models import User, Post
from datetime import datetime
from functools import wraps
methods = ["GET", "POST"]
@flask_server.route("/")
def index():
return redirect(url_for("discover"))
@flask_server.route("/discover")
def discover():
page = request.args.get("page", 1, type=int)
posts = Post.query.order_by(Post.timestamp.desc()).paginate(
page, flask_server.config["POSTS_PER_PAGE"], False
)
next_url = url_for("discover", page=posts.next_num) if posts.has_next else None
prev_url = url_for("discover", page=posts.prev_num) if posts.has_prev else None
return render_template(
"discover.html",
title="Argus",
posts=posts.items,
next_url=next_url,
prev_url=prev_url,
)
@flask_server.route("/login", methods=methods)
def login():
""" The controller to handle incoming GET and POST requests to the `/login` URL of the Flask web server.
1. Checks to see if the user is already logged in.
a. If so, returns a response object that redirects the client to the '/index' route.
- Results from a GET request from an authenticated user.
b. If the user is not already logged in, makes the `LoginForm` (created using Flask-WTF) available to the `templates/login` view by passing it and the view as parameters to Flask's built-in `render_template()` function.
- Results from a GET request from an unauthenticated user.
2. If a correct username/pw combo is submitted, then an HTTP request is made to the remote SQL database to query it for the user database object model with the current user's `username`.
- This operation is safe because the databse enforces unique `usernames` upon registration. Also, the `login_user()` method uses the primary key `user_id` to actually log the user in- this operation simply retrieves the user object.
- Results from a POST request to this route when the form is sumbitted.
3. The user database object model is stored in a Python data structure and the controller makes it available to the Flask-Login method `login_user()` by passing it as a parameter to that method.
a. The `login_user()` method populates Flask's [request context](https://flask.palletsprojects.com/en/1.1.x/reqcontext/) with the logged in user's database object model, which can then be accessed by this and other views and controllers./ to get information about the user.
b. Flask automatically pushes a new request context (`ctx`) to the stack when handling each request. View functions, error handlers, and other functions that run during the request lifecycle will have access to the [request proxy](https://flask.palletsprojects.com/en/1.1.x/api/#flask.request), which points to the request object for the current request.
c. Prior to a user logging in, `ctx.user` (an attribute of the request context) is an instance of the `AnonymousUserMixin` class. After a user logs in, `ctx.user` is an instance of the User database object model defined using the Flask-SQLAlchemy extension in `flask_server/models`.
d. Sucessfully calling `login_user()` creates a session, as all subsequent requests will now have access to user's database object model. Without using sessions, the user would have to send authenticated requests each time they wanted to access a protected view instead of just once at log in.
4. The user is redirected to the `index` view after sucessfully logging in.
Parameters
----------
param1 : string
The first parameter is the URL being requested by the client.
Returns
-------
str
The login/signup page of the app, as generated by the `templates/login` Jinja2 template.
"""
if current_user.is_authenticated:
return redirect(url_for("feed"))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash("Invalid username or password")
return redirect(url_for("login"))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get("next")
if not next_page or url_parse(next_page).netloc != "":
next_page = url_for("feed")
return redirect(next_page)
return render_template("login.html", title="Log In", form=form)
@flask_server.route("/feed")
@login_required
def feed():
""" The controller to handle incoming GET requests to the root and `/index` URLs of the Flask web server.
1. Uses the [`@login_required decorator`](https://flask-login.readthedocs.io/en/latest/#flask_login.login_required) imported from the [Flask-Login](https://flask-login.readthedocs.io/en/latest/) extension to ensure that the current user is logged-in before responding with the actual view.
a. If the user is not logged in, the [LoginManager.unauthorized() callback function](https://flask-login.readthedocs.io/en/latest/#flask_login.LoginManager.unauthorized) is fired, which redirects the user to the `/login` controller.
- Results from a GET request from an unauthenticated user.
- This is equivalent to making the following the first commands run by the controller:
```
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
```
b. If the user is logged in, then the controller retrieves their `user_id` from the current [request context](https://flask.palletsprojects.com/en/1.1.x/reqcontext/).
- [`Sessions`](https://flask.palletsprojects.com/en/1.1.x/api/?highlight=session#sessions) make it possible to persist data between requests (like the `user_id` of the user making requests),even though HTTP is a stateless protocol.
- Results from a GET request from an authenticated user.
2. Fetches the user's posts by making an HTTP request to the remote SQL database for all posts associated with their `user_id` (which is also the primary key of the Users table).
3. Stores the user's posts in a Python data structure, and makes them available to the `templates/index` view by passing it and the view as parameters to Flask's built-in [`render_template()`](https://flask.palletsprojects.com/en/1.1.x/api/?highlight=render_template#flask.render_template) function.
4. The `templates/index` view uses [Jinja2 HTML templating](https://jinja.palletsprojects.com/en/2.10.x/) to display:
- A list of posts created by the logged-in user with links to create/update/delete.
- Links to view the pitcher dashboard and logout of the app.
Parameters
----------
param1 : string
The URL being requested by the client.
Returns
-------
str
The index page of the app, as generated by the `templates/index` Jinja2 template.
"""
page = request.args.get("page", 1, type=int)
posts = current_user.followed_posts().paginate(
page, flask_server.config["POSTS_PER_PAGE"], False
)
next_url = url_for("feed", page=posts.next_num) if posts.has_next else None
prev_url = url_for("feed", page=posts.prev_num) if posts.has_prev else None
return render_template(
"feed.html",
title="My Feed",
posts=posts.items,
next_url=next_url,
prev_url=prev_url,
)
@flask_server.route("/logout")
def logout():
""" The controller to handle incoming GET requests to the `/logout` URL of the Flask web server.
1. Terminates the current user's session and redirects to the `index` route.
- From now on, the request context user will be an instance of the `AnonymousUserMixin` class instead of an instance of a User database object model.
Parameters
----------
param1 : string
The first parameter is the URL being requested by the client.
Returns
-------
str
The index page of the app, as generated by the `templates/index` Jinja2 template.
"""
logout_user()
flash("Logged out!")
return redirect(url_for("discover"))
@flask_server.route("/create", methods=methods)
@login_required
def create_post():
""" The controller to handle incoming GET and POST requests to the `/create` URL of the Flask web server.
1. Makes the `PostForm()` created using Flask-WTF available to the `templates/create` view by passing the view and the form as parameters to Flask's built-in `render_template()` function.
- Results from a GET request from an authenticated user.
2. If data validation occurs, then an HTTP request is made to the remote SQL database requesting that a new row is inserted into the Posts table in the SQL database.
- There is a `one-to-many relationship` between `Users` and `Posts` because the foreign key of every row in the Post table is a `user_id` of a row from the Users table. Each user can have many posts but each post has only one user.
3. The user is redirected to the `index` view.
Parameters
----------
param1 : string
The first parameter is the URL being requested by the client.
Returns
-------
str
The create page generated by the Jinja2 template.
"""
form = PostForm()
time = datetime.utcnow()
if form.validate_on_submit():
try:
post = Post(user_id=current_user.id, url=form.url.data, body=form.body.data)
db.session.add(post)
db.session.commit()
flash("Congratulations, you have successfully created a post!")
return redirect(url_for("index"))
except:
flash("Sorry, there was an error creating your post!")
return redirect(url_for("index"))
return render_template("create.html", title="Create Post", form=form)
@flask_server.route("/delete/<int:id>")
@login_required
def delete(id):
""" The controller to handle incoming GET requests to the `/delete` URL of the web server.
1. Queries the SQL datatbase for the post with the specified `id`.
2. If the post was created by the logged-in user, then a row is deleted from the Posts table in the SQL database.
4. The user is redirected to the `index` view.
Parameters
----------
param1 : string
The first parameter is the URL being requested by the client.
Returns
-------
str
The index | |
'flag_tm'),
(b'\xf0\x9f\x87\xb9\xf0\x9f\x87\xb3', 'flag_tn', 'flag_tn'),
(b'\xf0\x9f\x87\xb9\xf0\x9f\x87\xb4', 'flag_to', 'flag_to'),
(b'\xf0\x9f\x87\xb9\xf0\x9f\x87\xb7', 'flag_tr', 'flag_tr'),
(b'\xf0\x9f\x87\xb9\xf0\x9f\x87\xb9', 'flag_tt', 'flag_tt'),
(b'\xf0\x9f\x87\xb9\xf0\x9f\x87\xbb', 'flag_tv', 'flag_tv'),
(b'\xf0\x9f\x87\xb9\xf0\x9f\x87\xbc', 'flag_tw', 'flag_tw'),
(b'\xf0\x9f\x87\xb9\xf0\x9f\x87\xbf', 'flag_tz', 'flag_tz'),
(b'\xf0\x9f\x87\xba\xf0\x9f\x87\xa6', 'flag_ua', 'flag_ua'),
(b'\xf0\x9f\x87\xba\xf0\x9f\x87\xac', 'flag_ug', 'flag_ug'),
(b'\xf0\x9f\x87\xba\xf0\x9f\x87\xb2', 'flag_um', 'flag_um'),
(b'\xf0\x9f\x87\xba\xf0\x9f\x87\xb8', 'flag_us', 'flag_us'),
(b'\xf0\x9f\x87\xba\xf0\x9f\x87\xbe', 'flag_uy', 'flag_uy'),
(b'\xf0\x9f\x87\xba\xf0\x9f\x87\xbf', 'flag_uz', 'flag_uz'),
(b'\xf0\x9f\x87\xbb\xf0\x9f\x87\xa6', 'flag_va', 'flag_va'),
(b'\xf0\x9f\x87\xbb\xf0\x9f\x87\xa8', 'flag_vc', 'flag_vc'),
(b'\xf0\x9f\x87\xbb\xf0\x9f\x87\xaa', 'flag_ve', 'flag_ve'),
(b'\xf0\x9f\x87\xbb\xf0\x9f\x87\xac', 'flag_vg', 'flag_vg'),
(b'\xf0\x9f\x87\xbb\xf0\x9f\x87\xae', 'flag_vi', 'flag_vi'),
(b'\xf0\x9f\x87\xbb\xf0\x9f\x87\xb3', 'flag_vn', 'flag_vn'),
(b'\xf0\x9f\x87\xbb\xf0\x9f\x87\xba', 'flag_vu', 'flag_vu'),
(b'\xf0\x9f\x87\xbc\xf0\x9f\x87\xab', 'flag_wf', 'flag_wf'),
(b'\xf0\x9f\x8f\xb3', 'flag_white', 'flag_white_vs16'),
(b'\xf0\x9f\x87\xbc\xf0\x9f\x87\xb8', 'flag_ws', 'flag_ws'),
(b'\xf0\x9f\x87\xbd\xf0\x9f\x87\xb0', 'flag_xk', 'flag_xk'),
(b'\xf0\x9f\x87\xbe\xf0\x9f\x87\xaa', 'flag_ye', 'flag_ye'),
(b'\xf0\x9f\x87\xbe\xf0\x9f\x87\xb9', 'flag_yt', 'flag_yt'),
(b'\xf0\x9f\x87\xbf\xf0\x9f\x87\xa6', 'flag_za', 'flag_za'),
(b'\xf0\x9f\x87\xbf\xf0\x9f\x87\xb2', 'flag_zm', 'flag_zm'),
(b'\xf0\x9f\x87\xbf\xf0\x9f\x87\xbc', 'flag_zw', 'flag_zw'),
(b'\xf0\x9f\x8e\x8f', 'flags', 'flags'),
(b'\xf0\x9f\x94\xa6', 'flashlight', 'flashlight'),
(b'\xe2\x9a\x9c', 'fleur_de_lis', 'fleur_de_lis_vs16'),
(b'\xf0\x9f\x92\xbe', 'floppy_disk', 'floppy_disk'),
(b'\xf0\x9f\x8e\xb4', 'flower_playing_cards', 'flower_playing_cards'),
(b'\xf0\x9f\x98\xb3', 'flushed', 'flushed'),
(b'\xf0\x9f\x8c\xab', 'fog', 'fog_vs16'),
(b'\xf0\x9f\x8c\x81', 'foggy', 'foggy'),
(b'\xf0\x9f\x8f\x88', 'football', 'football'),
(b'\xf0\x9f\x91\xa3', 'footprints', 'footprints'),
(b'\xf0\x9f\x8d\xb4', 'fork_and_knife', 'fork_and_knife'),
(b'\xf0\x9f\x8d\xbd', 'fork_and_knife_with_plate', 'fork_and_knife_with_plate_vs16'),
(b'\xe2\x9b\xb2', 'fountain', 'fountain'),
(b'4\xe2\x83\xa3', 'four', 'four_vs16'),
(b'\xf0\x9f\x8d\x80', 'four_leaf_clover', 'four_leaf_clover'),
(b'\xf0\x9f\xa6\x8a', 'fox', 'fox', 'fox_face'),
(b'\xf0\x9f\x96\xbc', 'frame_photo', 'frame_photo_vs16'),
(b'\xf0\x9f\x86\x93', 'free', 'free'),
(b'\xf0\x9f\x8d\xa4', 'fried_shrimp', 'fried_shrimp'),
(b'\xf0\x9f\x8d\x9f', 'fries', 'fries'),
(b'\xf0\x9f\x90\xb8', 'frog', 'frog'),
(b'\xe2\x98\xb9', 'frowning2', 'frowning2_vs16'),
(b'\xe2\x9b\xbd', 'fuelpump', 'fuelpump'),
(b'\xf0\x9f\x8c\x95', 'full_moon', 'full_moon'),
(b'\xf0\x9f\x8c\x9d', 'full_moon_with_face', 'full_moon_with_face'),
(b'\xe2\x9a\xb1', 'funeral_urn', 'funeral_urn_vs16'),
(b'\xf0\x9f\x8e\xb2', 'game_die', 'game_die'),
(b'\xf0\x9f\x8f\xb3\xef\xb8\x8f\xe2\x80\x8d\xf0\x9f\x8c\x88', 'gay_pride_flag', 'gay_pride_flag', 'rainbow_flag'),
(b'\xe2\x9a\x99', 'gear', 'gear_vs16'),
(b'\xf0\x9f\x92\x8e', 'gem', 'gem'),
(b'\xe2\x99\x8a', 'gemini', 'gemini'),
(b'\xf0\x9f\x91\xbb', 'ghost', 'ghost'),
(b'\xf0\x9f\x8e\x81', 'gift', 'gift'),
(b'\xf0\x9f\x92\x9d', 'gift_heart', 'gift_heart'),
(b'\xf0\x9f\x91\xa7', 'girl', 'girl'),
(b'\xf0\x9f\x91\xa7\xf0\x9f\x8f\xbb', 'girl_tone1', 'girl_tone1'),
(b'\xf0\x9f\x91\xa7\xf0\x9f\x8f\xbc', 'girl_tone2', 'girl_tone2'),
(b'\xf0\x9f\x91\xa7\xf0\x9f\x8f\xbd', 'girl_tone3', 'girl_tone3'),
(b'\xf0\x9f\x91\xa7\xf0\x9f\x8f\xbe', 'girl_tone4', 'girl_tone4'),
(b'\xf0\x9f\x91\xa7\xf0\x9f\x8f\xbf', 'girl_tone5', 'girl_tone5'),
(b'\xf0\x9f\xa5\x9b', 'glass_of_milk', 'glass_of_milk', 'milk'),
(b'\xf0\x9f\x8c\x90', 'globe_with_meridians', 'globe_with_meridians'),
(b'\xf0\x9f\xa5\x85', 'goal', 'goal', 'goal_net'),
(b'\xf0\x9f\x90\x90', 'goat', 'goat'),
(b'\xe2\x9b\xb3', 'golf', 'golf'),
(b'\xf0\x9f\x8f\x8c', 'golfer', 'golfer_vs16'),
(b'\xf0\x9f\x8f\x8c\xf0\x9f\x8f\xbb', 'golfer_tone1', 'golfer_tone1', 'person_golfing_tone1'),
(b'\xf0\x9f\x8f\x8c\xf0\x9f\x8f\xbc', 'golfer_tone2', 'golfer_tone2', 'person_golfing_tone2'),
(b'\xf0\x9f\x8f\x8c\xf0\x9f\x8f\xbd', 'golfer_tone3', 'golfer_tone3', 'person_golfing_tone3'),
(b'\xf0\x9f\x8f\x8c\xf0\x9f\x8f\xbe', 'golfer_tone4', 'golfer_tone4', 'person_golfing_tone4'),
(b'\xf0\x9f\x8f\x8c\xf0\x9f\x8f\xbf', 'golfer_tone5', 'golfer_tone5', 'person_golfing_tone5'),
(b'\xf0\x9f\xa6\x8d', 'gorilla', 'gorilla'),
(b'\xf0\x9f\x91\xb5', 'grandma', 'grandma', 'older_woman'),
(b'\xf0\x9f\x91\xb5\xf0\x9f\x8f\xbb', 'grandma_tone1', 'grandma_tone1', 'older_woman_tone1'),
(b'\xf0\x9f\x91\xb5\xf0\x9f\x8f\xbc', 'grandma_tone2', 'grandma_tone2', 'older_woman_tone2'),
(b'\xf0\x9f\x91\xb5\xf0\x9f\x8f\xbd', 'grandma_tone3', 'grandma_tone3', 'older_woman_tone3'),
(b'\xf0\x9f\x91\xb5\xf0\x9f\x8f\xbe', 'grandma_tone4', 'grandma_tone4', 'older_woman_tone4'),
(b'\xf0\x9f\x91\xb5\xf0\x9f\x8f\xbf', 'grandma_tone5', 'grandma_tone5', 'older_woman_tone5'),
(b'\xf0\x9f\x8d\x87', 'grapes', 'grapes'),
(b'\xf0\x9f\x8d\x8f', 'green_apple', 'green_apple'),
(b'\xf0\x9f\x93\x97', 'green_book', 'green_book'),
(b'\xf0\x9f\x92\x9a', 'green_heart', 'green_heart'),
(b'\xf0\x9f\xa5\x97', 'green_salad', 'green_salad', 'salad'),
(b'\xe2\x9d\x95', 'grey_exclamation', 'grey_exclamation'),
(b'\xe2\x9d\x94', 'grey_question', 'grey_question'),
(b'\xf0\x9f\x98\xac', 'grimacing', 'grimacing'),
(b'\xf0\x9f\x98\x81', 'grin', 'grin'),
(b'\xf0\x9f\x98\x80', 'grinning', 'grinning'),
(b'\xf0\x9f\x92\x82', 'guardsman', 'guardsman', 'guard'),
(b'\xf0\x9f\x92\x82\xf0\x9f\x8f\xbb', 'guardsman_tone1', 'guardsman_tone1', 'guard_tone1'),
(b'\xf0\x9f\x92\x82\xf0\x9f\x8f\xbc', 'guardsman_tone2', 'guardsman_tone2', 'guard_tone2'),
(b'\xf0\x9f\x92\x82\xf0\x9f\x8f\xbd', 'guardsman_tone3', 'guardsman_tone3', 'guard_tone3'),
(b'\xf0\x9f\x92\x82\xf0\x9f\x8f\xbe', 'guardsman_tone4', 'guardsman_tone4', 'guard_tone4'),
(b'\xf0\x9f\x92\x82\xf0\x9f\x8f\xbf', 'guardsman_tone5', 'guardsman_tone5', 'guard_tone5'),
(b'\xf0\x9f\x8e\xb8', 'guitar', 'guitar'),
(b'\xf0\x9f\x94\xab', 'gun', 'gun'),
(b'\xf0\x9f\x92\x87', 'haircut', 'haircut', 'person_getting_haircut'),
(b'\xf0\x9f\x92\x87\xf0\x9f\x8f\xbb', 'haircut_tone1', 'haircut_tone1', 'person_getting_haircut_tone1'),
(b'\xf0\x9f\x92\x87\xf0\x9f\x8f\xbc', 'haircut_tone2', 'haircut_tone2', 'person_getting_haircut_tone2'),
(b'\xf0\x9f\x92\x87\xf0\x9f\x8f\xbd', 'haircut_tone3', 'haircut_tone3', 'person_getting_haircut_tone3'),
(b'\xf0\x9f\x92\x87\xf0\x9f\x8f\xbe', 'haircut_tone4', 'haircut_tone4', 'person_getting_haircut_tone4'),
(b'\xf0\x9f\x92\x87\xf0\x9f\x8f\xbf', 'haircut_tone5', 'haircut_tone5', 'person_getting_haircut_tone5'),
(b'\xf0\x9f\x8d\x94', 'hamburger', 'hamburger'),
(b'\xf0\x9f\x94\xa8', 'hammer', 'hammer'),
(b'\xe2\x9a\x92', 'hammer_and_pick', 'hammer_and_pick_vs16'),
(b'\xf0\x9f\x9b\xa0', 'hammer_and_wrench', 'hammer_and_wrench_vs16'),
(b'\xf0\x9f\x90\xb9', 'hamster', 'hamster'),
(b'\xf0\x9f\x96\x90', 'hand_splayed', 'hand_splayed_vs16'),
(b'\xf0\x9f\x96\x90\xf0\x9f\x8f\xbb', 'hand_splayed_tone1', 'hand_splayed_tone1', 'raised_hand_with_fingers_splayed_tone1'),
(b'\xf0\x9f\x96\x90\xf0\x9f\x8f\xbc', 'hand_splayed_tone2', 'hand_splayed_tone2', 'raised_hand_with_fingers_splayed_tone2'),
(b'\xf0\x9f\x96\x90\xf0\x9f\x8f\xbd', 'hand_splayed_tone3', 'hand_splayed_tone3', 'raised_hand_with_fingers_splayed_tone3'),
(b'\xf0\x9f\x96\x90\xf0\x9f\x8f\xbe', 'hand_splayed_tone4', 'hand_splayed_tone4', 'raised_hand_with_fingers_splayed_tone4'),
(b'\xf0\x9f\x96\x90\xf0\x9f\x8f\xbf', 'hand_splayed_tone5', 'hand_splayed_tone5', 'raised_hand_with_fingers_splayed_tone5'),
(b'\xf0\x9f\x91\x9c', 'handbag', 'handbag'),
(b'\xf0\x9f\xa4\xbe', 'handball', 'handball', 'person_playing_handball'),
(b'\xf0\x9f\xa4\xbe\xf0\x9f\x8f\xbb', 'handball_tone1', 'handball_tone1', 'person_playing_handball_tone1'),
(b'\xf0\x9f\xa4\xbe\xf0\x9f\x8f\xbc', 'handball_tone2', 'handball_tone2', 'person_playing_handball_tone2'),
(b'\xf0\x9f\xa4\xbe\xf0\x9f\x8f\xbd', 'handball_tone3', 'handball_tone3', 'person_playing_handball_tone3'),
(b'\xf0\x9f\xa4\xbe\xf0\x9f\x8f\xbe', 'handball_tone4', 'handball_tone4', 'person_playing_handball_tone4'),
(b'\xf0\x9f\xa4\xbe\xf0\x9f\x8f\xbf', 'handball_tone5', 'handball_tone5', 'person_playing_handball_tone5'),
(b'\xf0\x9f\xa4\x9d', 'handshake', 'handshake', 'shaking_hands'),
(b'\xf0\x9f\x92\xa9', 'hankey', 'hankey', 'poo', 'poop', 'shit'),
(b'#\xe2\x83\xa3', 'hash', 'hash_vs16'),
(b'\xf0\x9f\x90\xa5', 'hatched_chick', 'hatched_chick'),
(b'\xf0\x9f\x90\xa3', 'hatching_chick', 'hatching_chick'),
(b'\xf0\x9f\x8e\xa7', 'headphones', 'headphones'),
(b'\xf0\x9f\x99\x89', 'hear_no_evil', 'hear_no_evil'),
(b'\xe2\x9d\xa4', 'heart', 'heart_vs16'),
(b'\xf0\x9f\x92\x9f', 'heart_decoration', 'heart_decoration'),
(b'\xe2\x9d\xa3', 'heart_exclamation', 'heart_exclamation_vs16'),
(b'\xf0\x9f\x98\x8d', 'heart_eyes', 'heart_eyes'),
(b'\xf0\x9f\x98\xbb', 'heart_eyes_cat', 'heart_eyes_cat'),
(b'\xf0\x9f\x92\x93', 'heartbeat', 'heartbeat'),
(b'\xf0\x9f\x92\x97', 'heartpulse', 'heartpulse'),
(b'\xe2\x99\xa5', 'hearts', 'hearts_vs16'),
(b'\xe2\x9c\x94', 'heavy_check_mark', 'heavy_check_mark_vs16'),
(b'\xe2\x9e\x97', 'heavy_division_sign', 'heavy_division_sign'),
(b'\xf0\x9f\x92\xb2', 'heavy_dollar_sign', 'heavy_dollar_sign'),
(b'\xe2\x9e\x96', 'heavy_minus_sign', 'heavy_minus_sign'),
(b'\xe2\x9c\x96', 'heavy_multiplication_x', 'heavy_multiplication_x_vs16'),
(b'\xe2\x9e\x95', 'heavy_plus_sign', 'heavy_plus_sign'),
(b'\xf0\x9f\x9a\x81', 'helicopter', 'helicopter'),
(b'\xe2\x9b\x91', 'helmet_with_cross', 'helmet_with_cross_vs16'),
(b'\xf0\x9f\x8c\xbf', 'herb', 'herb'),
(b'\xf0\x9f\x8c\xba', 'hibiscus', 'hibiscus'),
(b'\xf0\x9f\x94\x86', 'high_brightness', 'high_brightness'),
(b'\xf0\x9f\x91\xa0', 'high_heel', 'high_heel'),
(b'\xf0\x9f\x8f\x92', 'hockey', 'hockey'),
(b'\xf0\x9f\x95\xb3', 'hole', 'hole_vs16'),
(b'\xf0\x9f\x8f\x98', 'homes', 'homes_vs16'),
(b'\xf0\x9f\x8d\xaf', 'honey_pot', 'honey_pot'),
(b'\xf0\x9f\x90\xb4', 'horse', 'horse'),
(b'\xf0\x9f\x8f\x87', 'horse_racing', 'horse_racing'),
(b'\xf0\x9f\x8f\x87\xf0\x9f\x8f\xbb', 'horse_racing_tone1', 'horse_racing_tone1'),
(b'\xf0\x9f\x8f\x87\xf0\x9f\x8f\xbc', 'horse_racing_tone2', 'horse_racing_tone2'),
(b'\xf0\x9f\x8f\x87\xf0\x9f\x8f\xbd', 'horse_racing_tone3', 'horse_racing_tone3'),
(b'\xf0\x9f\x8f\x87\xf0\x9f\x8f\xbe', 'horse_racing_tone4', 'horse_racing_tone4'),
(b'\xf0\x9f\x8f\x87\xf0\x9f\x8f\xbf', 'horse_racing_tone5', 'horse_racing_tone5'),
(b'\xf0\x9f\x8f\xa5', 'hospital', 'hospital'),
(b'\xf0\x9f\x8c\xad', 'hot_dog', 'hot_dog', 'hotdog'),
(b'\xf0\x9f\x8c\xb6', 'hot_pepper', 'hot_pepper_vs16'),
(b'\xf0\x9f\x8f\xa8', 'hotel', 'hotel'),
(b'\xe2\x99\xa8', 'hotsprings', 'hotsprings_vs16'),
(b'\xe2\x8c\x9b', 'hourglass', 'hourglass'),
(b'\xe2\x8f\xb3', 'hourglass_flowing_sand', 'hourglass_flowing_sand'),
(b'\xf0\x9f\x8f\xa0', 'house', 'house'),
(b'\xf0\x9f\x8f\xa1', 'house_with_garden', 'house_with_garden'),
(b'\xf0\x9f\xa4\x97', 'hugging', 'hugging', 'hugging_face'),
(b'\xf0\x9f\x98\xaf', 'hushed', 'hushed'),
(b'\xf0\x9f\x8d\xa8', 'ice_cream', 'ice_cream'),
(b'\xe2\x9b\xb8', 'ice_skate', 'ice_skate_vs16'),
(b'\xf0\x9f\x8d\xa6', 'icecream', 'icecream'),
(b'\xf0\x9f\x86\x94', 'id', 'id'),
(b'\xf0\x9f\x89\x90', 'ideograph_advantage', 'ideograph_advantage'),
(b'\xf0\x9f\x91\xbf', 'imp', 'imp', ']:(', ']:-(', ']=(', ']=-('),
(b'\xf0\x9f\x93\xa5', 'inbox_tray', 'inbox_tray'),
(b'\xf0\x9f\x93\xa8', 'incoming_envelope', 'incoming_envelope'),
(b'\xf0\x9f\x92\x81', 'information_desk_person', 'information_desk_person', 'person_tipping_hand'),
(b'\xf0\x9f\x92\x81\xf0\x9f\x8f\xbb', 'information_desk_person_tone1', 'information_desk_person_tone1', 'person_tipping_hand_tone1'),
(b'\xf0\x9f\x92\x81\xf0\x9f\x8f\xbc', 'information_desk_person_tone2', 'information_desk_person_tone2', 'person_tipping_hand_tone2'),
(b'\xf0\x9f\x92\x81\xf0\x9f\x8f\xbd', 'information_desk_person_tone3', 'information_desk_person_tone3', 'person_tipping_hand_tone3'),
(b'\xf0\x9f\x92\x81\xf0\x9f\x8f\xbe', 'information_desk_person_tone4', 'information_desk_person_tone4', 'person_tipping_hand_tone4'),
(b'\xf0\x9f\x92\x81\xf0\x9f\x8f\xbf', 'information_desk_person_tone5', 'information_desk_person_tone5', 'person_tipping_hand_tone5'),
(b'\xe2\x84\xb9', 'information_source', 'information_source_vs16'),
(b'\xe2\x81\x89', 'interrobang', 'interrobang_vs16'),
(b'\xf0\x9f\x93\xb1', 'iphone', 'iphone', 'mobile_phone'),
(b'\xf0\x9f\x8f\xae', 'izakaya_lantern', 'izakaya_lantern'),
(b'\xf0\x9f\x8e\x83', 'jack_o_lantern', 'jack_o_lantern'),
(b'\xf0\x9f\x97\xbe', 'japan', 'japan'),
(b'\xf0\x9f\x8f\xaf', 'japanese_castle', 'japanese_castle'),
(b'\xf0\x9f\x91\xba', 'japanese_goblin', 'japanese_goblin'),
(b'\xf0\x9f\x91\xb9', 'japanese_ogre', 'japanese_ogre'),
(b'\xf0\x9f\x91\x96', 'jeans', 'jeans'),
(b'\xf0\x9f\x98\xb9', 'joy_cat', 'joy_cat'),
(b'\xf0\x9f\x95\xb9', 'joystick', 'joystick_vs16'),
(b'\xf0\x9f\xa4\xb9', 'juggler', 'juggler', 'juggling', 'person_juggling'),
(b'\xf0\x9f\xa4\xb9\xf0\x9f\x8f\xbb', 'juggler_tone1', 'juggler_tone1', 'juggling_tone1', 'person_juggling_tone1'),
(b'\xf0\x9f\xa4\xb9\xf0\x9f\x8f\xbc', 'juggler_tone2', 'juggler_tone2', 'juggling_tone2', 'person_juggling_tone2'),
(b'\xf0\x9f\xa4\xb9\xf0\x9f\x8f\xbd', 'juggler_tone3', 'juggler_tone3', 'juggling_tone3', 'person_juggling_tone3'),
(b'\xf0\x9f\xa4\xb9\xf0\x9f\x8f\xbe', 'juggler_tone4', 'juggler_tone4', 'juggling_tone4', 'person_juggling_tone4'),
(b'\xf0\x9f\xa4\xb9\xf0\x9f\x8f\xbf', 'juggler_tone5', 'juggler_tone5', 'juggling_tone5', 'person_juggling_tone5'),
(b'\xf0\x9f\x95\x8b', 'kaaba', 'kaaba'),
(b'\xf0\x9f\xa5\x8b', 'karate_uniform', 'karate_uniform', 'martial_arts_uniform'),
(b'\xf0\x9f\x97\x9d', 'key2', 'key2_vs16'),
(b'\xf0\x9f\x94\x91', 'key', 'key'),
(b'\xe2\x8c\xa8', 'keyboard', 'keyboard_vs16'),
(b'\xf0\x9f\x94\x9f', 'keycap_ten', 'keycap_ten'),
(b'\xf0\x9f\x91\x98', 'kimono', 'kimono'),
(b'\xf0\x9f\x92\x8b', 'kiss', 'kiss'),
(b'\xf0\x9f\x98\xbd', 'kissing_cat', 'kissing_cat'),
(b'\xf0\x9f\x98\x9a', 'kissing_closed_eyes', 'kissing_closed_eyes'),
(b'\xf0\x9f\x98\x98', 'kissing_heart', 'kissing_heart'),
(b'\xf0\x9f\x98\x99', 'kissing_smiling_eyes', 'kissing_smiling_eyes'),
(b'\xf0\x9f\xa5\x9d', 'kiwi', 'kiwi', 'kiwifruit'),
(b'\xf0\x9f\x94\xaa', 'knife', 'knife'),
(b'\xf0\x9f\x90\xa8', 'koala', 'koala'),
(b'\xf0\x9f\x88\x81', 'koko', 'koko'),
(b'\xf0\x9f\x8f\xb7', 'label', 'label_vs16'),
(b'\xf0\x9f\x94\xb5', 'large_blue_circle', 'large_blue_circle', 'blue_circle'),
(b'\xf0\x9f\x94\xb7', 'large_blue_diamond', 'large_blue_diamond'),
(b'\xf0\x9f\x94\xb6', 'large_orange_diamond', 'large_orange_diamond'),
(b'\xf0\x9f\x8c\x97', 'last_quarter_moon', 'last_quarter_moon'),
(b'\xf0\x9f\x8c\x9c', 'last_quarter_moon_with_face', 'last_quarter_moon_with_face'),
(b'\xf0\x9f\x98\x86', 'laughing', 'laughing', 'satisfied', 'x-)', 'X-)'),
(b'\xf0\x9f\x8d\x83', 'leaves', 'leaves'),
(b'\xf0\x9f\x93\x92', 'ledger', 'ledger'),
(b'\xf0\x9f\xa4\x9b', 'left_facing_fist', 'left_facing_fist', 'left_fist'),
(b'\xf0\x9f\xa4\x9b\xf0\x9f\x8f\xbb', 'left_facing_fist_tone1', 'left_facing_fist_tone1', 'left_fist_tone1'),
(b'\xf0\x9f\xa4\x9b\xf0\x9f\x8f\xbc', 'left_facing_fist_tone2', 'left_facing_fist_tone2', 'left_fist_tone2'),
(b'\xf0\x9f\xa4\x9b\xf0\x9f\x8f\xbd', 'left_facing_fist_tone3', 'left_facing_fist_tone3', 'left_fist_tone3'),
(b'\xf0\x9f\xa4\x9b\xf0\x9f\x8f\xbe', 'left_facing_fist_tone4', 'left_facing_fist_tone4', 'left_fist_tone4'),
(b'\xf0\x9f\xa4\x9b\xf0\x9f\x8f\xbf', 'left_facing_fist_tone5', 'left_facing_fist_tone5', 'left_fist_tone5'),
(b'\xf0\x9f\x9b\x85', 'left_luggage', 'left_luggage'),
(b'\xe2\x86\x94', 'left_right_arrow', 'left_right_arrow_vs16'),
(b'\xf0\x9f\x97\xa8', 'left_speech_bubble', 'left_speech_bubble_vs16'),
(b'\xe2\x86\xa9', 'leftwards_arrow_with_hook', 'leftwards_arrow_with_hook_vs16'),
(b'\xf0\x9f\x8d\x8b', 'lemon', 'lemon'),
(b'\xe2\x99\x8c', 'leo', 'leo'),
(b'\xf0\x9f\x90\x86', 'leopard', 'leopard'),
(b'\xf0\x9f\x8e\x9a', 'level_slider', 'level_slider_vs16'),
(b'\xf0\x9f\x95\xb4', 'levitate', 'levitate_vs16'),
(b'\xf0\x9f\x95\xb4\xf0\x9f\x8f\xbb', 'levitate_tone1', 'levitate_tone1', 'man_in_business_suit_levitating_tone1'),
(b'\xf0\x9f\x95\xb4\xf0\x9f\x8f\xbc', 'levitate_tone2', 'levitate_tone2', 'man_in_business_suit_levitating_tone2'),
(b'\xf0\x9f\x95\xb4\xf0\x9f\x8f\xbd', 'levitate_tone3', 'levitate_tone3', 'man_in_business_suit_levitating_tone3'),
(b'\xf0\x9f\x95\xb4\xf0\x9f\x8f\xbe', 'levitate_tone4', 'levitate_tone4', 'man_in_business_suit_levitating_tone4'),
(b'\xf0\x9f\x95\xb4\xf0\x9f\x8f\xbf', 'levitate_tone5', 'levitate_tone5', 'man_in_business_suit_levitating_tone5'),
(b'\xf0\x9f\xa4\xa5', 'liar', 'liar', 'lying_face'),
(b'\xe2\x99\x8e', 'libra', 'libra'),
(b'\xf0\x9f\x8f\x8b', 'lifter', 'lifter_vs16'),
(b'\xf0\x9f\x8f\x8b\xf0\x9f\x8f\xbb', 'lifter_tone1', 'lifter_tone1', 'weight_lifter_tone1', 'person_lifting_weights_tone1'),
(b'\xf0\x9f\x8f\x8b\xf0\x9f\x8f\xbc', 'lifter_tone2', 'lifter_tone2', 'weight_lifter_tone2', 'person_lifting_weights_tone2'),
(b'\xf0\x9f\x8f\x8b\xf0\x9f\x8f\xbd', 'lifter_tone3', 'lifter_tone3', 'weight_lifter_tone3', 'person_lifting_weights_tone3'),
(b'\xf0\x9f\x8f\x8b\xf0\x9f\x8f\xbe', 'lifter_tone4', 'lifter_tone4', 'weight_lifter_tone4', 'person_lifting_weights_tone4'),
(b'\xf0\x9f\x8f\x8b\xf0\x9f\x8f\xbf', 'lifter_tone5', 'lifter_tone5', 'weight_lifter_tone5', 'person_lifting_weights_tone5'),
(b'\xf0\x9f\x9a\x88', 'light_rail', 'light_rail'),
(b'\xf0\x9f\x94\x97', 'link', 'link'),
(b'\xf0\x9f\x96\x87', 'linked_paperclips', 'linked_paperclips_vs16'),
(b'\xf0\x9f\xa6\x81', 'lion', 'lion', 'lion_face'),
(b'\xf0\x9f\x91\x84', 'lips', 'lips'),
(b'\xf0\x9f\x92\x84', 'lipstick', 'lipstick'),
(b'\xf0\x9f\xa6\x8e', 'lizard', 'lizard'),
(b'\xf0\x9f\x94\x92', 'lock', 'lock'),
(b'\xf0\x9f\x94\x8f', 'lock_with_ink_pen', 'lock_with_ink_pen'),
(b'\xf0\x9f\x8d\xad', 'lollipop', 'lollipop'),
(b'\xe2\x9e\xbf', 'loop', 'loop'),
(b'\xf0\x9f\x94\x8a', 'loud_sound', 'loud_sound'),
(b'\xf0\x9f\x93\xa2', 'loudspeaker', 'loudspeaker'),
(b'\xf0\x9f\x8f\xa9', 'love_hotel', 'love_hotel'),
(b'\xf0\x9f\x92\x8c', 'love_letter', 'love_letter'),
(b'\xf0\x9f\x94\x85', 'low_brightness', 'low_brightness'),
(b'\xf0\x9f\x96\x8a', 'lower_left_ballpoint_pen', 'lower_left_ballpoint_pen_vs16'),
(b'\xf0\x9f\x96\x8b', 'lower_left_fountain_pen', 'lower_left_fountain_pen_vs16'),
(b'\xf0\x9f\x96\x8c', 'lower_left_paintbrush', 'lower_left_paintbrush_vs16'),
(b'\xe2\x93\x82', 'm', 'm_vs16'),
(b'\xf0\x9f\x94\x8d', 'mag', 'mag'),
(b'\xf0\x9f\x94\x8e', 'mag_right', 'mag_right'),
(b'\xf0\x9f\x80\x84', 'mahjong', 'mahjong'),
(b'\xf0\x9f\x93\xab', 'mailbox', 'mailbox'),
(b'\xf0\x9f\x93\xaa', 'mailbox_closed', 'mailbox_closed'),
(b'\xf0\x9f\x93\xac', 'mailbox_with_mail', 'mailbox_with_mail'),
(b'\xf0\x9f\x93\xad', 'mailbox_with_no_mail', 'mailbox_with_no_mail'),
(b'\xf0\x9f\x95\xba', 'male_dancer', 'male_dancer', 'man_dancing'),
(b'\xf0\x9f\x95\xba\xf0\x9f\x8f\xbb', 'male_dancer_tone1', 'male_dancer_tone1', 'man_dancing_tone1'),
(b'\xf0\x9f\x95\xba\xf0\x9f\x8f\xbc', 'male_dancer_tone2', 'male_dancer_tone2', 'man_dancing_tone2'),
(b'\xf0\x9f\x95\xba\xf0\x9f\x8f\xbd', 'male_dancer_tone3', 'male_dancer_tone3', 'man_dancing_tone3'),
(b'\xf0\x9f\x95\xba\xf0\x9f\x8f\xbe', 'male_dancer_tone4', 'male_dancer_tone4', 'man_dancing_tone4'),
(b'\xf0\x9f\x95\xba\xf0\x9f\x8f\xbf', 'male_dancer_tone5', 'male_dancer_tone5', 'man_dancing_tone5'),
(b'\xf0\x9f\x91\xa8', 'man', 'man'),
(b'\xf0\x9f\x91\xa8\xf0\x9f\x8f\xbb', 'man_tone1', 'man_tone1'),
(b'\xf0\x9f\x91\xa8\xf0\x9f\x8f\xbc', 'man_tone2', 'man_tone2'),
(b'\xf0\x9f\x91\xa8\xf0\x9f\x8f\xbd', 'man_tone3', 'man_tone3'),
(b'\xf0\x9f\x91\xa8\xf0\x9f\x8f\xbe', 'man_tone4', 'man_tone4'),
(b'\xf0\x9f\x91\xa8\xf0\x9f\x8f\xbf', 'man_tone5', 'man_tone5'),
(b'\xf0\x9f\xa4\xb5', 'man_in_tuxedo', 'man_in_tuxedo', 'person_in_tuxedo'),
(b'\xf0\x9f\xa4\xb5\xf0\x9f\x8f\xbb', 'man_in_tuxedo_tone1', 'man_in_tuxedo_tone1', 'tuxedo_tone1', 'person_in_tuxedo_tone1'),
(b'\xf0\x9f\xa4\xb5\xf0\x9f\x8f\xbc', 'man_in_tuxedo_tone2', 'man_in_tuxedo_tone2', 'tuxedo_tone2', 'person_in_tuxedo_tone2'),
(b'\xf0\x9f\xa4\xb5\xf0\x9f\x8f\xbd', 'man_in_tuxedo_tone3', 'man_in_tuxedo_tone3', 'tuxedo_tone3', 'person_in_tuxedo_tone3'),
(b'\xf0\x9f\xa4\xb5\xf0\x9f\x8f\xbe', 'man_in_tuxedo_tone4', 'man_in_tuxedo_tone4', 'tuxedo_tone4', 'person_in_tuxedo_tone4'),
(b'\xf0\x9f\xa4\xb5\xf0\x9f\x8f\xbf', 'man_in_tuxedo_tone5', 'man_in_tuxedo_tone5', 'tuxedo_tone5', 'person_in_tuxedo_tone5'),
(b'\xf0\x9f\x91\xb2', 'man_with_gua_pi_mao', 'man_with_gua_pi_mao', 'man_with_chinese_cap'),
(b'\xf0\x9f\x91\xb2\xf0\x9f\x8f\xbb', 'man_with_gua_pi_mao_tone1', 'man_with_gua_pi_mao_tone1', 'man_with_chinese_cap_tone1'),
(b'\xf0\x9f\x91\xb2\xf0\x9f\x8f\xbc', 'man_with_gua_pi_mao_tone2', 'man_with_gua_pi_mao_tone2', 'man_with_chinese_cap_tone2'),
(b'\xf0\x9f\x91\xb2\xf0\x9f\x8f\xbd', 'man_with_gua_pi_mao_tone3', 'man_with_gua_pi_mao_tone3', 'man_with_chinese_cap_tone3'),
(b'\xf0\x9f\x91\xb2\xf0\x9f\x8f\xbe', 'man_with_gua_pi_mao_tone4', 'man_with_gua_pi_mao_tone4', 'man_with_chinese_cap_tone4'),
(b'\xf0\x9f\x91\xb2\xf0\x9f\x8f\xbf', 'man_with_gua_pi_mao_tone5', 'man_with_gua_pi_mao_tone5', 'man_with_chinese_cap_tone5'),
(b'\xf0\x9f\x91\xb3', 'man_with_turban', 'man_with_turban', 'person_wearing_turban'),
(b'\xf0\x9f\x91\xb3\xf0\x9f\x8f\xbb', 'man_with_turban_tone1', 'man_with_turban_tone1', 'person_wearing_turban_tone1'),
(b'\xf0\x9f\x91\xb3\xf0\x9f\x8f\xbc', 'man_with_turban_tone2', 'man_with_turban_tone2', 'person_wearing_turban_tone2'),
(b'\xf0\x9f\x91\xb3\xf0\x9f\x8f\xbd', 'man_with_turban_tone3', 'man_with_turban_tone3', 'person_wearing_turban_tone3'),
(b'\xf0\x9f\x91\xb3\xf0\x9f\x8f\xbe', 'man_with_turban_tone4', 'man_with_turban_tone4', 'person_wearing_turban_tone4'),
(b'\xf0\x9f\x91\xb3\xf0\x9f\x8f\xbf', 'man_with_turban_tone5', 'man_with_turban_tone5', 'person_wearing_turban_tone5'),
(b'\xf0\x9f\x91\x9e', 'mans_shoe', 'mans_shoe'),
(b'\xf0\x9f\x97\xba', 'map', 'map_vs16'),
(b'\xf0\x9f\x8d\x81', 'maple_leaf', 'maple_leaf'),
(b'\xf0\x9f\x98\xb7', 'mask', 'mask'),
(b'\xf0\x9f\x92\x86', 'massage', 'massage', 'person_getting_massage'),
(b'\xf0\x9f\x92\x86\xf0\x9f\x8f\xbb', 'massage_tone1', 'massage_tone1', 'person_getting_massage_tone1'),
(b'\xf0\x9f\x92\x86\xf0\x9f\x8f\xbc', 'massage_tone2', 'massage_tone2', 'person_getting_massage_tone2'),
(b'\xf0\x9f\x92\x86\xf0\x9f\x8f\xbd', 'massage_tone3', 'massage_tone3', 'person_getting_massage_tone3'),
(b'\xf0\x9f\x92\x86\xf0\x9f\x8f\xbe', 'massage_tone4', 'massage_tone4', 'person_getting_massage_tone4'),
(b'\xf0\x9f\x92\x86\xf0\x9f\x8f\xbf', 'massage_tone5', 'massage_tone5', 'person_getting_massage_tone5'),
(b'\xf0\x9f\x8d\x96', 'meat_on_bone', 'meat_on_bone'),
(b'\xf0\x9f\x8f\x85', 'medal', 'medal', 'sports_medal'),
(b'\xf0\x9f\x93\xa3', 'mega', 'mega'),
(b'\xf0\x9f\x8d\x88', 'melon', 'melon'),
(b'\xf0\x9f\x95\x8e', 'menorah', 'menorah'),
(b'\xf0\x9f\x9a\xb9', 'mens', 'mens'),
(b'\xf0\x9f\xa4\x98', 'metal', 'metal', 'sign_of_the_horns'),
(b'\xf0\x9f\xa4\x98\xf0\x9f\x8f\xbb', 'metal_tone1', 'metal_tone1', 'sign_of_the_horns_tone1'),
(b'\xf0\x9f\xa4\x98\xf0\x9f\x8f\xbc', 'metal_tone2', 'metal_tone2', 'sign_of_the_horns_tone2'),
(b'\xf0\x9f\xa4\x98\xf0\x9f\x8f\xbd', 'metal_tone3', 'metal_tone3', 'sign_of_the_horns_tone3'),
(b'\xf0\x9f\xa4\x98\xf0\x9f\x8f\xbe', 'metal_tone4', 'metal_tone4', 'sign_of_the_horns_tone4'),
(b'\xf0\x9f\xa4\x98\xf0\x9f\x8f\xbf', 'metal_tone5', 'metal_tone5', 'sign_of_the_horns_tone5'),
(b'\xf0\x9f\x9a\x87', 'metro', 'metro'),
(b'\xf0\x9f\x8e\x99', 'microphone2', 'microphone2_vs16'),
(b'\xf0\x9f\x8e\xa4', 'microphone', 'microphone'),
(b'\xf0\x9f\x94\xac', 'microscope', 'microscope'),
(b'\xf0\x9f\x96\x95', 'middle_finger', 'middle_finger', 'reversed_hand_with_middle_finger_extended'),
(b'\xf0\x9f\x96\x95\xf0\x9f\x8f\xbb', 'middle_finger_tone1', 'middle_finger_tone1', 'reversed_hand_with_middle_finger_extended_tone1'),
(b'\xf0\x9f\x96\x95\xf0\x9f\x8f\xbc', 'middle_finger_tone2', 'middle_finger_tone2', 'reversed_hand_with_middle_finger_extended_tone2'),
(b'\xf0\x9f\x96\x95\xf0\x9f\x8f\xbd', 'middle_finger_tone3', 'middle_finger_tone3', 'reversed_hand_with_middle_finger_extended_tone3'),
(b'\xf0\x9f\x96\x95\xf0\x9f\x8f\xbe', 'middle_finger_tone4', 'middle_finger_tone4', 'reversed_hand_with_middle_finger_extended_tone4'),
(b'\xf0\x9f\x96\x95\xf0\x9f\x8f\xbf', 'middle_finger_tone5', 'middle_finger_tone5', 'reversed_hand_with_middle_finger_extended_tone5'),
(b'\xf0\x9f\x8e\x96', 'military_medal', 'military_medal_vs16'),
(b'\xf0\x9f\x8c\x8c', 'milky_way', 'milky_way'),
(b'\xf0\x9f\x9a\x90', 'minibus', 'minibus'),
(b'\xf0\x9f\x92\xbd', 'minidisc', 'minidisc'),
(b'\xf0\x9f\x93\xb4', 'mobile_phone_off', 'mobile_phone_off'),
(b'\xf0\x9f\xa4\x91', 'money_mouth', 'money_mouth', 'money_mouth_face'),
(b'\xf0\x9f\x92\xb8', 'money_with_wings', 'money_with_wings'),
(b'\xf0\x9f\x92\xb0', 'moneybag', 'moneybag'),
(b'\xf0\x9f\x90\x92', 'monkey', 'monkey'),
(b'\xf0\x9f\x90\xb5', 'monkey_face', 'monkey_face'),
(b'\xf0\x9f\x9a\x9d', 'monorail', 'monorail'),
(b'\xf0\x9f\x8e\x93', 'mortar_board', 'mortar_board'),
(b'\xf0\x9f\x95\x8c', 'mosque', 'mosque'),
(b'\xf0\x9f\xa4\xb6', 'mother_christmas', 'mother_christmas', 'mrs_claus'),
(b'\xf0\x9f\xa4\xb6\xf0\x9f\x8f\xbb', 'mother_christmas_tone1', 'mother_christmas_tone1', 'mrs_claus_tone1'),
(b'\xf0\x9f\xa4\xb6\xf0\x9f\x8f\xbc', 'mother_christmas_tone2', 'mother_christmas_tone2', 'mrs_claus_tone2'),
(b'\xf0\x9f\xa4\xb6\xf0\x9f\x8f\xbd', 'mother_christmas_tone3', 'mother_christmas_tone3', 'mrs_claus_tone3'),
(b'\xf0\x9f\xa4\xb6\xf0\x9f\x8f\xbe', 'mother_christmas_tone4', 'mother_christmas_tone4', 'mrs_claus_tone4'),
(b'\xf0\x9f\xa4\xb6\xf0\x9f\x8f\xbf', 'mother_christmas_tone5', 'mother_christmas_tone5', 'mrs_claus_tone5'),
(b'\xf0\x9f\x9b\xb5', 'motor_scooter', 'motor_scooter', 'motorbike'),
(b'\xf0\x9f\x9b\xa5', 'motorboat', 'motorboat_vs16'),
(b'\xf0\x9f\x8f\x8d', 'motorcycle', 'motorcycle_vs16'),
(b'\xf0\x9f\x9b\xa3', 'motorway', 'motorway_vs16'),
(b'\xf0\x9f\x97\xbb', | |
simple_embed(ctx, "Please enter the full url.")
return
params = {}
for i in range(1, 9):
params["card" + str(i)] = card_keys[i - 1]
await ctx.invoke(self.bot.get_command("deck add"), **params)
try:
await ctx.message.delete()
except Exception:
pass
@deck.command(name="list")
async def deck_list(self, ctx: commands.Context, member: discord.Member = None):
"""List the decks of a user."""
member_is_author = False
if member is None:
member = ctx.author
member_is_author = True
decks = await self.settings.member(member).decks()
deck_id = 1
for time_stamp, deck in decks.items():
url = await self.decklink_url(deck["Deck"], war=False)
await self.upload_deck_image(
ctx,
deck["Deck"],
deck["DeckName"],
member,
description="**{}**. {}".format(deck_id, deck["DeckName"]),
title="Copy deck",
url=url,
)
# await self.decklink(ctx, deck["Deck"])
deck_id += 1
if not len(decks):
if member_is_author:
await simple_embed(
ctx,
"You don’t have any decks stored.\n"
"Type `!deck add` to add some.",
)
else:
await simple_embed(
ctx, "{} hasn’t added any decks yet.".format(member.display_name)
)
@deck.command(name="longlist")
async def deck_longlist(self, ctx: commands.Context, member: discord.Member = None):
"""List the decks of a user."""
member_is_author = False
if not member:
member = ctx.author
member_is_author = True
decks = await self.settings.member(member).decks()
if not len(decks):
if member_is_author:
await simple_embed(
ctx,
"You don’t have any decks stored.\n"
"Type `!deck add` to add some.",
)
else:
await simple_embed(
ctx, "{} hasn’t added any decks yet.".format(member.display_name)
)
return
deck_id = 1
results_max = 3
for k, deck in decks.items():
await self.upload_deck_image(
ctx,
deck["Deck"],
deck["DeckName"],
member,
description="**{}**. {}".format(deck_id, deck["DeckName"]),
)
deck_id += 1
if (deck_id - 1) % results_max == 0:
if deck_id < len(decks):
await ctx.send("Would you like to see the next results?")
pred = MessagePredicate.yes_or_no(ctx)
await self.bot.wait_for("message", check=pred)
answer = pred.result
if not answer:
await ctx.send("Results aborted.")
return
@deck.command(name="show")
async def deck_show(
self,
ctx: commands.Context,
deck_id: int,
member: Optional[discord.Member] = None,
):
"""Show the deck of a user by id. With link to copy."""
if not member:
member = ctx.author
deck_id -= 1
decks = await self.settings.member(member).decks()
if not decks:
await simple_embed(ctx, "You have not added any decks.")
return
if len(decks) > deck_id + 1:
await simple_embed(ctx, "This deck does not exist.")
return
for i, deck in enumerate(decks.values()):
if i == deck_id:
await self.deck_upload(ctx, deck["Deck"], deck["DeckName"], member)
# generate link
await self.decklink(ctx, deck["Deck"])
async def decklink(self, ctx: commands.Context, deck_cards):
"""Show deck link depending on settings."""
decklink_setting = await self.decklink_settings(ctx.guild)
if decklink_setting == "embed":
em = await self.decklink_embed(deck_cards)
await ctx.send(embed=em)
elif decklink_setting == "link":
url = await self.decklink_url(deck_cards)
await ctx.send("<{}>".format(url))
async def decklink_embed(self, deck_cards, war=False):
"""Decklink embed."""
url = await self.decklink_url(deck_cards, war=war)
if war:
em = discord.Embed(
title="Copy deck to war deck", url=url, timestamp=dt.datetime.utcnow(),
)
else:
em = discord.Embed(
title="Copy deck", url=url, timestamp=dt.datetime.utcnow(),
)
em.set_footer(text=credits, icon_url=credits_icon)
return em
async def decklink_url(self, deck_cards, war=False):
"""Decklink URL."""
deck_cards = self.normalize_deck_data(deck_cards)
ids = []
for card in deck_cards:
id = await self.card_key_to_id(card)
if id is not None:
ids.append(await self.card_key_to_id(card))
url = "https://link.clashroyale.com/deck/en?deck=" + ";".join(ids)
if war:
url += "&war=1"
return url
@deck.command(name="cards")
async def deck_cards(self, ctx: commands.Context):
"""Display all available cards and acceptable abbreviations."""
out = []
for card in sorted(self.cards, key=lambda x: x["name"].lower()):
key = card["key"]
names = [key]
name = card["name"]
for abbrev_k, abbrev_v in self.cards_abbrev.items():
if abbrev_v == key:
names.append(abbrev_k)
rarity = card["rarity"]
elixir = card["elixir"]
out.append(
"**{}** ({}, {} elixir): {}".format(
name, rarity, elixir, ", ".join(names)
)
)
pages = []
for page in pagify("\n".join(out), shorten_by=24):
embed = discord.Embed(description=page, timestamp=dt.datetime.utcnow(),)
embed.set_footer(text=credits, icon_url=credits_icon)
pages.append(embed)
await menu(ctx, pages, DEFAULT_CONTROLS, timeout=PAGINATION_TIMEOUT)
@deck.command(name="search")
async def deck_search(self, ctx: commands.Context, *params):
"""Search all decks by cards."""
if not len(params):
await simple_embed(ctx, "You must enter at least one card to search.")
return
all_members_data = list((await self.settings.all_members()).values())
# normalize params
params = self.normalize_deck_data(params)
found_decks = []
for member_data in all_members_data:
for member_id, server_member in member_data.items():
member_decks = server_member["decks"]
member_id = member_id
member = self.bot.get_user(member_id)
if member:
member_display_name = member.getattr("display_name", None)
else:
member = member_id
member_display_name = member_id
for k, member_deck in member_decks.items():
cards = member_deck["Deck"]
# await self.bot.say(set(params))
if set(params) < set(cards):
found_decks.append(
{
"UTC": k,
"Deck": member_deck["Deck"],
"DeckName": member_deck["DeckName"],
"Member": member,
"MemberDisplayName": member_display_name,
}
)
found_decks = sorted(found_decks, key=lambda x: x["UTC"], reverse=True)
await ctx.send("Found {} decks".format(len(found_decks)))
if len(found_decks):
results_max = 3
deck_id = 1
for deck in found_decks:
timestamp = deck["UTC"][:19]
description = "**{}. {}** by {} — {}".format(
deck_id, deck["DeckName"], deck["MemberDisplayName"], timestamp
)
await self.upload_deck_image(
ctx,
deck["Deck"],
deck["DeckName"],
deck["Member"],
description=description,
)
deck_id += 1
if (deck_id - 1) % results_max == 0:
if deck_id < len(found_decks):
await ctx.send("Would you like to see the next results?")
pred = MessagePredicate.yes_or_no(ctx)
await self.bot.wait_for("message", check=pred)
answer = pred.result
if not answer:
await ctx.send("Results aborted.")
return
@deck.command(name="rename")
async def deck_rename(self, ctx: commands.Context, deck_id: int, *, new_name: str):
"""Rename a deck based on deck id.
Syntax: !deck rename [deck_id] [new_name]
where deck_id is the number associated with the deck when you run !deck list
"""
author = ctx.message.author
deck_id = int(deck_id) - 1
async with self.settings.member(author).decks() as member_decks:
# check member has data
if not len(member_decks):
await simple_embed(ctx, "You have not added any decks.")
return
if deck_id >= len(member_decks):
await simple_embed(ctx, "The deck id you have entered is invalid.")
return
for i, deck in enumerate(member_decks.values()):
if deck_id == i:
deck["DeckName"] = new_name
await simple_embed(ctx, "Deck renamed to {}.".format(new_name))
await self.deck_upload(ctx, deck["Deck"], new_name, author)
@deck.command(name="remove")
async def deck_remove(self, ctx: commands.Context, deck_id: int):
"""Remove a deck by deck id."""
author = ctx.message.author
async with self.settings.member(author).decks() as member_decks:
if not len(member_decks):
await simple_embed(ctx, "You have not added any decks.")
else:
deck_id = int(deck_id) - 1
if deck_id >= len(member_decks):
await simple_embed(ctx, "The deck id you have entered is invalid.")
else:
remove_key = ""
for i, key in enumerate(member_decks.keys()):
if deck_id == i:
remove_key = key
member_decks.pop(remove_key)
await simple_embed(ctx, "Deck {} removed.".format(deck_id + 1))
@deck.command(name="help")
async def deck_help(self, ctx: commands.Context):
"""Complete help and tutorial."""
await simple_embed(
ctx,
"Please visit [this link]({}) for an illustrated guide.".format(HELP_URL),
)
async def deck_upload(
self, ctx: commands.Context, member_deck, deck_name: str, member=None
):
"""Upload deck to Discord."""
author = ctx.message.author
if member is None:
member = author
member_deck = self.normalize_deck_data(member_deck)
deck_is_valid = True
# Ensure: exactly 8 cards are entered
if len(member_deck) != 8:
await ctx.send(
"You have entered {} card{}. "
"Please enter exactly 8 cards.".format(
len(member_deck), "s" if len(member_deck) > 1 else ""
)
)
await ctx.send_help()
deck_is_valid = False
# Ensure: card names are valid
if not set(member_deck) < set(self.valid_card_keys):
for card in member_deck:
if card not in self.valid_card_keys:
await ctx.send("**{}** is not a valid card name.".format(card))
await ctx.send("\nType `{}deck cards` for the full list".format(ctx.prefix))
deck_is_valid = False
if deck_is_valid:
await self.post_deck(
channel=ctx.message.channel,
card_keys=member_deck,
deck_name=deck_name,
deck_author=member.display_name,
)
self.deck_is_valid = deck_is_valid
async def upload_deck_image(
self, ctx: commands.Context, deck, deck_name, author, **embed_params
):
"""Upload deck image to the server."""
deck_image = await self.bot.loop.run_in_executor(
None, self.get_deck_image, deck, deck_name, author
)
# construct a filename using first three letters of each card
filename = "deck-{}.png".format("-".join([card[:3] for card in deck]))
message = None
with io.BytesIO() as f:
deck_image.save(f, "PNG")
f.seek(0)
timestamp = embed_params.pop("timestamp", dt.datetime.utcnow())
embed = discord.Embed(timestamp=timestamp, **embed_params,)
embed.set_image(url="attachment://{}".format(filename))
embed.set_footer(text=credits, icon_url=credits_icon)
message = await ctx.message.channel.send(
file=discord.File(f, filename=filename), embed=embed
)
return message
async def upload_deck_image_to(
self, channel, deck, deck_name, author, **embed_params
):
"""Upload deck image to destination."""
deck_image = await self.bot.loop.run_in_executor(
None, self.get_deck_image, deck, deck_name, author
)
# construct a filename using first three letters of each card
filename = "deck-{}.png".format("-".join([card[:3] for card in deck]))
message = None
with io.BytesIO() as f:
deck_image.save(f, "PNG")
f.seek(0)
timestamp = embed_params.pop("timestamp", dt.datetime.utcnow())
embed = discord.Embed(timestamp=timestamp, **embed_params)
embed.set_image(url="attachment://{}".format(filename))
embed.set_footer(text=credits, icon_url=credits_icon)
message = await channel.send(
file=discord.File(f, filename=filename), embed=embed
)
# message = await self.bot.send_file(
# , f,
# filename=filename, content=description)
return message
def get_deck_elxir(self, card_keys):
# elixir
total_elixir = 0
# total card exclude mirror (0-elixir cards)
card_count = 0
for card in self.cards:
if card["key"] in card_keys:
total_elixir += card["elixir"]
if card["elixir"]:
card_count += 1
average_elixir = "{:.3f}".format(total_elixir / card_count)
return average_elixir
def get_deck_image(self, deck, deck_name=None, deck_author=None):
"""Construct the deck with Pillow and return image."""
card_w = 302
card_h = 363
card_x = 30
card_y = 30
font_size = 50
txt_y_line1 | |
kommunenavn[int(1845.0)] = createStringReference("S\u00f8rfold")
kommunenavn[int(1848.0)] = createStringReference("Steigen")
kommunenavn[int(1849.0)] = createStringReference("Hamar\u00f8y \u2013 H\u00e1bmer")
kommunenavn[int(1850.0)] = createStringReference("Divtasvuodna \u2013 Tysfjord")
kommunenavn[int(1851.0)] = createStringReference("L\u00f8dingen")
kommunenavn[int(1852.0)] = createStringReference("Tjeldsund")
kommunenavn[int(1853.0)] = createStringReference("Evenes")
kommunenavn[int(1854.0)] = createStringReference("Ballangen")
kommunenavn[int(1856.0)] = createStringReference("R\u00f8st")
kommunenavn[int(1857.0)] = createStringReference("V\u00e6r\u00f8y")
kommunenavn[int(1859.0)] = createStringReference("Flakstad")
kommunenavn[int(1860.0)] = createStringReference("Vestv\u00e5g\u00f8y")
kommunenavn[int(1865.0)] = createStringReference("V\u00e5gan")
kommunenavn[int(1866.0)] = createStringReference("Hadsel")
kommunenavn[int(1867.0)] = createStringReference("B\u00f8")
kommunenavn[int(1868.0)] = createStringReference("\u00d8ksnes")
kommunenavn[int(1870.0)] = createStringReference("Sortland \u2013 Suort\u00e1")
kommunenavn[int(1871.0)] = createStringReference("And\u00f8y")
kommunenavn[int(1874.0)] = createStringReference("Moskenes")
kommunenavn[int(1903.0)] = createStringReference("Harstad \u2013 H\u00e1rstt\u00e1k")
kommunenavn[int(1902.0)] = createStringReference("Troms\u00f8")
kommunenavn[int(1911.0)] = createStringReference("Kv\u00e6fjord")
kommunenavn[int(1913.0)] = createStringReference("Sk\u00e5nland")
kommunenavn[int(1917.0)] = createStringReference("Ibestad")
kommunenavn[int(1919.0)] = createStringReference("Gratangen")
kommunenavn[int(1920.0)] = createStringReference("Loab\u00e1k \u2013 Lavangen")
kommunenavn[int(1922.0)] = createStringReference("Bardu")
kommunenavn[int(1923.0)] = createStringReference("Salangen")
kommunenavn[int(1924.0)] = createStringReference("M\u00e5lselv")
kommunenavn[int(1925.0)] = createStringReference("S\u00f8rreisa")
kommunenavn[int(1926.0)] = createStringReference("Dyr\u00f8y")
kommunenavn[int(1927.0)] = createStringReference("Tran\u00f8y")
kommunenavn[int(1928.0)] = createStringReference("Torsken")
kommunenavn[int(1929.0)] = createStringReference("Berg")
kommunenavn[int(1931.0)] = createStringReference("Lenvik")
kommunenavn[int(1933.0)] = createStringReference("Balsfjord")
kommunenavn[int(1936.0)] = createStringReference("Karls\u00f8y")
kommunenavn[int(1938.0)] = createStringReference("Lyngen")
kommunenavn[int(1939.0)] = createStringReference("Storfjord \u2013 Omasvuotna \u2013 Omasvuono")
kommunenavn[int(1940.0)] = createStringReference("G\u00e1ivuotna \u2013 K\u00e5fjord \u2013 Kaivuono")
kommunenavn[int(1941.0)] = createStringReference("Skjerv\u00f8y")
kommunenavn[int(1942.0)] = createStringReference("Nordreisa")
kommunenavn[int(1943.0)] = createStringReference("Kv\u00e6nangen")
kommunenavn[int(2002.0)] = createStringReference("Vard\u00f8")
kommunenavn[int(2003.0)] = createStringReference("Vads\u00f8")
kommunenavn[int(2004.0)] = createStringReference("Hammerfest")
kommunenavn[int(2011.0)] = createStringReference("Guovdageaidnu \u2013 Kautokeino")
kommunenavn[int(2012.0)] = createStringReference("Alta")
kommunenavn[int(2014.0)] = createStringReference("Loppa")
kommunenavn[int(2015.0)] = createStringReference("Hasvik")
kommunenavn[int(2017.0)] = createStringReference("Kvalsund")
kommunenavn[int(2018.0)] = createStringReference("M\u00e5s\u00f8y")
kommunenavn[int(2019.0)] = createStringReference("Nordkapp")
kommunenavn[int(2020.0)] = createStringReference("Porsanger \u2013 Pors\u00e1\u014bgu \u2013 Porsanki")
kommunenavn[int(2021.0)] = createStringReference("K\u00e1r\u00e1\u0161johka \u2013 Karasjok")
kommunenavn[int(2022.0)] = createStringReference("Lebesby")
kommunenavn[int(2023.0)] = createStringReference("Gamvik")
kommunenavn[int(2024.0)] = createStringReference("Berlev\u00e5g")
kommunenavn[int(2025.0)] = createStringReference("Deatnu \u2013 Tana")
kommunenavn[int(2027.0)] = createStringReference("Unj\u00e1rga \u2013 Nesseby")
kommunenavn[int(2028.0)] = createStringReference("B\u00e5tsfjord")
kommunenavn[int(2030.0)] = createStringReference("S\u00f8r-Varanger")
return kommunenavn
def hentGyldigeKommunenummer():
gyldigeKommunenummer = [None]*int(425.0 + 1.0)
gyldigeKommunenummer[int(0.0)] = 101.0
gyldigeKommunenummer[int(1.0)] = 104.0
gyldigeKommunenummer[int(2.0)] = 105.0
gyldigeKommunenummer[int(3.0)] = 106.0
gyldigeKommunenummer[int(4.0)] = 111.0
gyldigeKommunenummer[int(5.0)] = 118.0
gyldigeKommunenummer[int(6.0)] = 119.0
gyldigeKommunenummer[int(7.0)] = 121.0
gyldigeKommunenummer[int(8.0)] = 122.0
gyldigeKommunenummer[int(9.0)] = 123.0
gyldigeKommunenummer[int(10.0)] = 124.0
gyldigeKommunenummer[int(11.0)] = 125.0
gyldigeKommunenummer[int(12.0)] = 127.0
gyldigeKommunenummer[int(13.0)] = 128.0
gyldigeKommunenummer[int(14.0)] = 135.0
gyldigeKommunenummer[int(15.0)] = 136.0
gyldigeKommunenummer[int(16.0)] = 137.0
gyldigeKommunenummer[int(17.0)] = 138.0
gyldigeKommunenummer[int(18.0)] = 211.0
gyldigeKommunenummer[int(19.0)] = 213.0
gyldigeKommunenummer[int(20.0)] = 214.0
gyldigeKommunenummer[int(21.0)] = 215.0
gyldigeKommunenummer[int(22.0)] = 216.0
gyldigeKommunenummer[int(23.0)] = 217.0
gyldigeKommunenummer[int(24.0)] = 219.0
gyldigeKommunenummer[int(25.0)] = 220.0
gyldigeKommunenummer[int(26.0)] = 221.0
gyldigeKommunenummer[int(27.0)] = 226.0
gyldigeKommunenummer[int(28.0)] = 227.0
gyldigeKommunenummer[int(29.0)] = 228.0
gyldigeKommunenummer[int(30.0)] = 229.0
gyldigeKommunenummer[int(31.0)] = 230.0
gyldigeKommunenummer[int(32.0)] = 231.0
gyldigeKommunenummer[int(33.0)] = 233.0
gyldigeKommunenummer[int(34.0)] = 234.0
gyldigeKommunenummer[int(35.0)] = 235.0
gyldigeKommunenummer[int(36.0)] = 236.0
gyldigeKommunenummer[int(37.0)] = 237.0
gyldigeKommunenummer[int(38.0)] = 238.0
gyldigeKommunenummer[int(39.0)] = 239.0
gyldigeKommunenummer[int(40.0)] = 301.0
gyldigeKommunenummer[int(41.0)] = 402.0
gyldigeKommunenummer[int(42.0)] = 403.0
gyldigeKommunenummer[int(43.0)] = 412.0
gyldigeKommunenummer[int(44.0)] = 415.0
gyldigeKommunenummer[int(45.0)] = 417.0
gyldigeKommunenummer[int(46.0)] = 418.0
gyldigeKommunenummer[int(47.0)] = 419.0
gyldigeKommunenummer[int(48.0)] = 420.0
gyldigeKommunenummer[int(49.0)] = 423.0
gyldigeKommunenummer[int(50.0)] = 425.0
gyldigeKommunenummer[int(51.0)] = 426.0
gyldigeKommunenummer[int(52.0)] = 427.0
gyldigeKommunenummer[int(53.0)] = 428.0
gyldigeKommunenummer[int(54.0)] = 429.0
gyldigeKommunenummer[int(55.0)] = 430.0
gyldigeKommunenummer[int(56.0)] = 432.0
gyldigeKommunenummer[int(57.0)] = 434.0
gyldigeKommunenummer[int(58.0)] = 436.0
gyldigeKommunenummer[int(59.0)] = 437.0
gyldigeKommunenummer[int(60.0)] = 438.0
gyldigeKommunenummer[int(61.0)] = 439.0
gyldigeKommunenummer[int(62.0)] = 441.0
gyldigeKommunenummer[int(63.0)] = 501.0
gyldigeKommunenummer[int(64.0)] = 502.0
gyldigeKommunenummer[int(65.0)] = 511.0
gyldigeKommunenummer[int(66.0)] = 512.0
gyldigeKommunenummer[int(67.0)] = 513.0
gyldigeKommunenummer[int(68.0)] = 514.0
gyldigeKommunenummer[int(69.0)] = 515.0
gyldigeKommunenummer[int(70.0)] = 516.0
gyldigeKommunenummer[int(71.0)] = 517.0
gyldigeKommunenummer[int(72.0)] = 519.0
gyldigeKommunenummer[int(73.0)] = 520.0
gyldigeKommunenummer[int(74.0)] = 521.0
gyldigeKommunenummer[int(75.0)] = 522.0
gyldigeKommunenummer[int(76.0)] = 528.0
gyldigeKommunenummer[int(77.0)] = 529.0
gyldigeKommunenummer[int(78.0)] = 532.0
gyldigeKommunenummer[int(79.0)] = 533.0
gyldigeKommunenummer[int(80.0)] = 534.0
gyldigeKommunenummer[int(81.0)] = 536.0
gyldigeKommunenummer[int(82.0)] = 538.0
gyldigeKommunenummer[int(83.0)] = 540.0
gyldigeKommunenummer[int(84.0)] = 541.0
gyldigeKommunenummer[int(85.0)] = 542.0
gyldigeKommunenummer[int(86.0)] = 543.0
gyldigeKommunenummer[int(87.0)] = 544.0
gyldigeKommunenummer[int(88.0)] = 545.0
gyldigeKommunenummer[int(89.0)] = 602.0
gyldigeKommunenummer[int(90.0)] = 604.0
gyldigeKommunenummer[int(91.0)] = 605.0
gyldigeKommunenummer[int(92.0)] = 612.0
gyldigeKommunenummer[int(93.0)] = 615.0
gyldigeKommunenummer[int(94.0)] = 616.0
gyldigeKommunenummer[int(95.0)] = 617.0
gyldigeKommunenummer[int(96.0)] = 618.0
gyldigeKommunenummer[int(97.0)] = 619.0
gyldigeKommunenummer[int(98.0)] = 620.0
gyldigeKommunenummer[int(99.0)] = 621.0
gyldigeKommunenummer[int(100.0)] = 622.0
gyldigeKommunenummer[int(101.0)] = 623.0
gyldigeKommunenummer[int(102.0)] = 624.0
gyldigeKommunenummer[int(103.0)] = 625.0
gyldigeKommunenummer[int(104.0)] = 626.0
gyldigeKommunenummer[int(105.0)] = 627.0
gyldigeKommunenummer[int(106.0)] = 628.0
gyldigeKommunenummer[int(107.0)] = 631.0
gyldigeKommunenummer[int(108.0)] = 632.0
gyldigeKommunenummer[int(109.0)] = 633.0
gyldigeKommunenummer[int(110.0)] = 701.0
gyldigeKommunenummer[int(111.0)] = 702.0
gyldigeKommunenummer[int(112.0)] = 704.0
gyldigeKommunenummer[int(113.0)] = 709.0
gyldigeKommunenummer[int(114.0)] = 710.0
gyldigeKommunenummer[int(115.0)] = 711.0
gyldigeKommunenummer[int(116.0)] = 713.0
gyldigeKommunenummer[int(117.0)] = 714.0
gyldigeKommunenummer[int(118.0)] = 716.0
gyldigeKommunenummer[int(119.0)] = 722.0
gyldigeKommunenummer[int(120.0)] = 723.0
gyldigeKommunenummer[int(121.0)] = 728.0
gyldigeKommunenummer[int(122.0)] = 805.0
gyldigeKommunenummer[int(123.0)] = 806.0
gyldigeKommunenummer[int(124.0)] = 807.0
gyldigeKommunenummer[int(125.0)] = 811.0
gyldigeKommunenummer[int(126.0)] = 814.0
gyldigeKommunenummer[int(127.0)] = 815.0
gyldigeKommunenummer[int(128.0)] = 817.0
gyldigeKommunenummer[int(129.0)] = 819.0
gyldigeKommunenummer[int(130.0)] = 821.0
gyldigeKommunenummer[int(131.0)] = 822.0
gyldigeKommunenummer[int(132.0)] = 826.0
gyldigeKommunenummer[int(133.0)] = 827.0
gyldigeKommunenummer[int(134.0)] = 828.0
gyldigeKommunenummer[int(135.0)] = 829.0
gyldigeKommunenummer[int(136.0)] = 830.0
gyldigeKommunenummer[int(137.0)] = 831.0
gyldigeKommunenummer[int(138.0)] = 833.0
gyldigeKommunenummer[int(139.0)] = 834.0
gyldigeKommunenummer[int(140.0)] = 901.0
gyldigeKommunenummer[int(141.0)] = 904.0
gyldigeKommunenummer[int(142.0)] = 906.0
gyldigeKommunenummer[int(143.0)] = 911.0
gyldigeKommunenummer[int(144.0)] = 912.0
gyldigeKommunenummer[int(145.0)] = 914.0
gyldigeKommunenummer[int(146.0)] = 919.0
gyldigeKommunenummer[int(147.0)] = 926.0
gyldigeKommunenummer[int(148.0)] = 928.0
gyldigeKommunenummer[int(149.0)] = 929.0
gyldigeKommunenummer[int(150.0)] = 935.0
gyldigeKommunenummer[int(151.0)] = 937.0
gyldigeKommunenummer[int(152.0)] = 938.0
gyldigeKommunenummer[int(153.0)] = 940.0
gyldigeKommunenummer[int(154.0)] = 941.0
gyldigeKommunenummer[int(155.0)] = 1001.0
gyldigeKommunenummer[int(156.0)] = 1002.0
gyldigeKommunenummer[int(157.0)] = 1003.0
gyldigeKommunenummer[int(158.0)] = 1004.0
gyldigeKommunenummer[int(159.0)] = 1014.0
gyldigeKommunenummer[int(160.0)] = 1017.0
gyldigeKommunenummer[int(161.0)] = 1018.0
gyldigeKommunenummer[int(162.0)] = 1021.0
gyldigeKommunenummer[int(163.0)] = 1026.0
gyldigeKommunenummer[int(164.0)] = 1027.0
gyldigeKommunenummer[int(165.0)] = 1029.0
gyldigeKommunenummer[int(166.0)] = 1032.0
gyldigeKommunenummer[int(167.0)] = 1034.0
gyldigeKommunenummer[int(168.0)] = 1037.0
gyldigeKommunenummer[int(169.0)] = 1046.0
gyldigeKommunenummer[int(170.0)] = 1101.0
gyldigeKommunenummer[int(171.0)] = 1102.0
gyldigeKommunenummer[int(172.0)] = 1103.0
gyldigeKommunenummer[int(173.0)] = 1106.0
gyldigeKommunenummer[int(174.0)] = 1111.0
gyldigeKommunenummer[int(175.0)] = 1112.0
gyldigeKommunenummer[int(176.0)] = 1114.0
gyldigeKommunenummer[int(177.0)] = 1119.0
gyldigeKommunenummer[int(178.0)] = 1120.0
gyldigeKommunenummer[int(179.0)] = 1121.0
gyldigeKommunenummer[int(180.0)] = 1122.0
gyldigeKommunenummer[int(181.0)] = 1124.0
gyldigeKommunenummer[int(182.0)] = 1127.0
gyldigeKommunenummer[int(183.0)] = 1129.0
gyldigeKommunenummer[int(184.0)] = 1130.0
gyldigeKommunenummer[int(185.0)] = 1133.0
gyldigeKommunenummer[int(186.0)] = 1134.0
gyldigeKommunenummer[int(187.0)] = 1135.0
gyldigeKommunenummer[int(188.0)] = 1141.0
gyldigeKommunenummer[int(189.0)] = 1142.0
gyldigeKommunenummer[int(190.0)] = 1144.0
gyldigeKommunenummer[int(191.0)] = 1145.0
gyldigeKommunenummer[int(192.0)] = 1146.0
gyldigeKommunenummer[int(193.0)] = 1149.0
gyldigeKommunenummer[int(194.0)] = 1151.0
gyldigeKommunenummer[int(195.0)] = 1160.0
gyldigeKommunenummer[int(196.0)] = 1201.0
gyldigeKommunenummer[int(197.0)] = 1211.0
gyldigeKommunenummer[int(198.0)] = 1216.0
gyldigeKommunenummer[int(199.0)] = 1219.0
gyldigeKommunenummer[int(200.0)] = 1221.0
gyldigeKommunenummer[int(201.0)] = 1222.0
gyldigeKommunenummer[int(202.0)] = 1223.0
gyldigeKommunenummer[int(203.0)] = 1224.0
gyldigeKommunenummer[int(204.0)] = 1227.0
gyldigeKommunenummer[int(205.0)] = 1228.0
gyldigeKommunenummer[int(206.0)] = 1231.0
gyldigeKommunenummer[int(207.0)] = 1232.0
gyldigeKommunenummer[int(208.0)] = 1233.0
gyldigeKommunenummer[int(209.0)] = 1234.0
gyldigeKommunenummer[int(210.0)] = 1235.0
gyldigeKommunenummer[int(211.0)] = 1238.0
gyldigeKommunenummer[int(212.0)] = 1241.0
gyldigeKommunenummer[int(213.0)] = 1242.0
gyldigeKommunenummer[int(214.0)] = 1243.0
gyldigeKommunenummer[int(215.0)] = 1244.0
gyldigeKommunenummer[int(216.0)] = 1245.0
gyldigeKommunenummer[int(217.0)] = 1246.0
gyldigeKommunenummer[int(218.0)] = 1247.0
gyldigeKommunenummer[int(219.0)] = 1251.0
gyldigeKommunenummer[int(220.0)] = 1252.0
gyldigeKommunenummer[int(221.0)] = 1253.0
gyldigeKommunenummer[int(222.0)] = 1256.0
gyldigeKommunenummer[int(223.0)] = 1259.0
gyldigeKommunenummer[int(224.0)] = 1260.0
gyldigeKommunenummer[int(225.0)] = 1263.0
gyldigeKommunenummer[int(226.0)] = 1264.0
gyldigeKommunenummer[int(227.0)] = 1265.0
gyldigeKommunenummer[int(228.0)] = 1266.0
gyldigeKommunenummer[int(229.0)] = 1401.0
gyldigeKommunenummer[int(230.0)] = 1411.0
gyldigeKommunenummer[int(231.0)] = 1412.0
gyldigeKommunenummer[int(232.0)] = 1413.0
gyldigeKommunenummer[int(233.0)] = 1416.0
gyldigeKommunenummer[int(234.0)] = 1417.0
gyldigeKommunenummer[int(235.0)] = 1418.0
gyldigeKommunenummer[int(236.0)] = 1419.0
gyldigeKommunenummer[int(237.0)] = 1420.0
gyldigeKommunenummer[int(238.0)] = 1421.0
gyldigeKommunenummer[int(239.0)] = 1422.0
gyldigeKommunenummer[int(240.0)] = 1424.0
gyldigeKommunenummer[int(241.0)] = 1426.0
gyldigeKommunenummer[int(242.0)] = 1428.0
gyldigeKommunenummer[int(243.0)] = 1429.0
gyldigeKommunenummer[int(244.0)] = 1430.0
gyldigeKommunenummer[int(245.0)] = 1431.0
gyldigeKommunenummer[int(246.0)] = 1432.0
gyldigeKommunenummer[int(247.0)] = 1433.0
gyldigeKommunenummer[int(248.0)] = 1438.0
gyldigeKommunenummer[int(249.0)] = 1439.0
gyldigeKommunenummer[int(250.0)] = 1441.0
gyldigeKommunenummer[int(251.0)] = 1443.0
gyldigeKommunenummer[int(252.0)] = 1444.0
gyldigeKommunenummer[int(253.0)] = 1445.0
gyldigeKommunenummer[int(254.0)] = 1449.0
gyldigeKommunenummer[int(255.0)] = 1502.0
gyldigeKommunenummer[int(256.0)] = 1504.0
gyldigeKommunenummer[int(257.0)] = 1505.0
gyldigeKommunenummer[int(258.0)] = 1511.0
gyldigeKommunenummer[int(259.0)] = 1514.0
gyldigeKommunenummer[int(260.0)] = 1515.0
gyldigeKommunenummer[int(261.0)] = 1516.0
gyldigeKommunenummer[int(262.0)] = 1517.0
gyldigeKommunenummer[int(263.0)] = 1519.0
gyldigeKommunenummer[int(264.0)] = 1520.0
gyldigeKommunenummer[int(265.0)] = 1523.0
gyldigeKommunenummer[int(266.0)] = 1524.0
gyldigeKommunenummer[int(267.0)] = 1525.0
gyldigeKommunenummer[int(268.0)] = 1526.0
gyldigeKommunenummer[int(269.0)] = 1528.0
gyldigeKommunenummer[int(270.0)] = 1529.0
gyldigeKommunenummer[int(271.0)] = 1531.0
gyldigeKommunenummer[int(272.0)] = 1532.0
gyldigeKommunenummer[int(273.0)] = 1534.0
gyldigeKommunenummer[int(274.0)] = 1535.0
gyldigeKommunenummer[int(275.0)] = 1539.0
gyldigeKommunenummer[int(276.0)] = 1543.0
gyldigeKommunenummer[int(277.0)] = 1545.0
gyldigeKommunenummer[int(278.0)] = 1546.0
gyldigeKommunenummer[int(279.0)] = 1547.0
gyldigeKommunenummer[int(280.0)] = 1548.0
gyldigeKommunenummer[int(281.0)] = 1551.0
gyldigeKommunenummer[int(282.0)] = 1554.0
gyldigeKommunenummer[int(283.0)] = 1557.0
gyldigeKommunenummer[int(284.0)] = 1560.0
gyldigeKommunenummer[int(285.0)] = 1563.0
gyldigeKommunenummer[int(286.0)] = 1566.0
gyldigeKommunenummer[int(287.0)] = 1567.0
gyldigeKommunenummer[int(288.0)] = 1571.0
gyldigeKommunenummer[int(289.0)] = 1573.0
gyldigeKommunenummer[int(290.0)] = 1576.0
gyldigeKommunenummer[int(291.0)] = 1601.0
gyldigeKommunenummer[int(292.0)] = 1612.0
gyldigeKommunenummer[int(293.0)] = 1613.0
gyldigeKommunenummer[int(294.0)] = 1617.0
gyldigeKommunenummer[int(295.0)] = 1620.0
gyldigeKommunenummer[int(296.0)] = 1621.0
gyldigeKommunenummer[int(297.0)] = 1622.0
gyldigeKommunenummer[int(298.0)] = 1624.0
gyldigeKommunenummer[int(299.0)] = 1627.0
gyldigeKommunenummer[int(300.0)] = 1630.0
gyldigeKommunenummer[int(301.0)] = 1632.0
gyldigeKommunenummer[int(302.0)] = 1633.0
gyldigeKommunenummer[int(303.0)] = 1634.0
gyldigeKommunenummer[int(304.0)] = 1635.0
gyldigeKommunenummer[int(305.0)] = 1636.0
gyldigeKommunenummer[int(306.0)] = 1638.0
gyldigeKommunenummer[int(307.0)] = 1640.0
gyldigeKommunenummer[int(308.0)] = 1644.0
gyldigeKommunenummer[int(309.0)] = 1648.0
gyldigeKommunenummer[int(310.0)] = 1653.0
gyldigeKommunenummer[int(311.0)] = 1657.0
gyldigeKommunenummer[int(312.0)] = 1662.0
gyldigeKommunenummer[int(313.0)] = 1663.0
gyldigeKommunenummer[int(314.0)] = 1664.0
gyldigeKommunenummer[int(315.0)] = 1665.0
gyldigeKommunenummer[int(316.0)] = 1702.0
gyldigeKommunenummer[int(317.0)] = 1703.0
gyldigeKommunenummer[int(318.0)] = 1711.0
gyldigeKommunenummer[int(319.0)] = 1714.0
gyldigeKommunenummer[int(320.0)] = 1717.0
gyldigeKommunenummer[int(321.0)] = 1718.0
gyldigeKommunenummer[int(322.0)] = 1719.0
gyldigeKommunenummer[int(323.0)] = 1721.0
gyldigeKommunenummer[int(324.0)] = 1724.0
gyldigeKommunenummer[int(325.0)] = 1725.0
gyldigeKommunenummer[int(326.0)] = 1736.0
gyldigeKommunenummer[int(327.0)] = 1738.0
gyldigeKommunenummer[int(328.0)] = 1739.0
gyldigeKommunenummer[int(329.0)] = 1740.0
gyldigeKommunenummer[int(330.0)] = 1742.0
gyldigeKommunenummer[int(331.0)] = 1743.0
gyldigeKommunenummer[int(332.0)] = 1744.0
gyldigeKommunenummer[int(333.0)] = 1748.0
gyldigeKommunenummer[int(334.0)] = 1749.0
gyldigeKommunenummer[int(335.0)] = 1750.0
gyldigeKommunenummer[int(336.0)] = 1751.0
gyldigeKommunenummer[int(337.0)] = 1755.0
gyldigeKommunenummer[int(338.0)] = 1756.0
gyldigeKommunenummer[int(339.0)] = 1804.0
gyldigeKommunenummer[int(340.0)] = 1805.0
gyldigeKommunenummer[int(341.0)] = 1811.0
gyldigeKommunenummer[int(342.0)] = 1812.0
gyldigeKommunenummer[int(343.0)] = 1813.0
gyldigeKommunenummer[int(344.0)] = 1815.0
gyldigeKommunenummer[int(345.0)] = 1816.0
gyldigeKommunenummer[int(346.0)] = 1818.0
gyldigeKommunenummer[int(347.0)] = 1820.0
gyldigeKommunenummer[int(348.0)] = 1822.0
gyldigeKommunenummer[int(349.0)] = 1824.0
gyldigeKommunenummer[int(350.0)] = 1825.0
gyldigeKommunenummer[int(351.0)] = 1826.0
| |
#
#
# Copyright (C) University of Melbourne 2012
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
"""Test of TxMultiGenerator
Using the Python unittest library:
http://docs.python.org/2/library/unittest.html#
To run it, at a command line:
python test_txmultigenerator.py
"""
import sys
sys.path.append('..')
import os
import unittest
import numpy
from tools import mureilexception, testutilities
from tools import mureilbuilder
from generator import txmultigeneratormultisite
sea = testutilities.make_sane_equality_array
class TestUpdateStateNewPeriod(unittest.TestCase):
def setUp(self):
testutilities.unittest_path_setup(self, __file__)
def tearDown(self):
os.chdir(self.cwd)
def test_1(self):
import pprint
pp = pprint.PrettyPrinter(indent=4)
tmg = txmultigeneratormultisite.TxMultiGeneratorMultiSite()
tmg.config['startup_data_name'] = ''
tmg.config['params_to_site_data_name'] = ''
data_types = tmg.get_data_types()
exp_data_types = []
self.assertTrue((data_types == exp_data_types))
tmg.config['startup_data_name'] = 'gen_startup'
data_types = tmg.get_data_types()
exp_data_types = ['gen_startup']
self.assertTrue((data_types == exp_data_types))
tmg.config['params_to_site_data_name'] = 'gen_site_map'
data_types = tmg.get_data_types()
exp_data_types = ['gen_startup', 'gen_site_map']
self.assertTrue((data_types == exp_data_types))
tmg.run_periods = [2010, 2020, 2030]
# Now try out the startup & params to site
params_to_site = numpy.array([33, 22, 44, 55, 11], dtype=int)
startup_data = numpy.array([[22, 100, 1990, 2020],
[22, 200, 2000, 2030],
[11, 300, 2000, 2010],
[44, 400, 1990, 2010],
[44, 1000, 1990, 2020]], dtype=float)
data = {}
data['gen_site_map'] = params_to_site
data['gen_startup'] = startup_data
tmg.set_data(data)
self.assertTrue((tmg.params_to_site == params_to_site).all())
self.assertEqual(tmg.get_param_count(), len(params_to_site))
self.assertTrue((tmg.extra_periods == [1990, 2000]))
startup_state = tmg.get_startup_state_handle()
exp_state = {}
exp_state['curr_period'] = None
exp_state['capacity'] = {}
exp_state['history'] = {}
cap_list = exp_state['capacity']
cap_list[22] = [(100, 1990, 2020), (200, 2000, 2030)]
cap_list[11] = [(300, 2000, 2010)]
cap_list[44] = [(400, 1990, 2010), (1000, 1990, 2020)]
self.assertTrue((exp_state == startup_state))
# Now set some more configs, and check that the expand_config works.
tmg.config['size'] = {2000: 10, 2010: 20, 2030: 30}
tmg.config['capital_cost'] = {2000: 5, 2010: 6, 2020: 7, 2030: 8}
tmg.config['install_cost'] = 0
tmg.config['decommissioning_cost'] = {2000: 0.1, 2010: 0.2}
tmg.config['time_period_yrs'] = 10
tmg.config['lifetime_yrs'] = {2000: 10, 2020: 30}
tmg.config['timestep_hrs'] = 1.0
tmg.expand_config([2000, 2010, 2020, 2030, 2040, 2050])
exp_pc = {}
exp_pc[1990] = {'size': 10,
'startup_data_name': 'gen_startup',
'time_period_yrs': 10,
'lifetime_yrs': 10,
'install_cost': 0,
'timestep_hrs': 1.0,
'capital_cost': 5,
'decommissioning_cost': 0.1,
'params_to_site_data_name': 'gen_site_map'}
exp_pc[2000] = {'size': 10,
'startup_data_name': 'gen_startup',
'time_period_yrs': 10,
'timestep_hrs': 1.0,
'install_cost': 0,
'lifetime_yrs': 10,
'capital_cost': 5,
'decommissioning_cost': 0.1,
'params_to_site_data_name': 'gen_site_map'}
exp_pc[2010] = {'size': 20,
'startup_data_name': 'gen_startup',
'time_period_yrs': 10,
'timestep_hrs': 1.0,
'install_cost': 0,
'lifetime_yrs': 10,
'capital_cost': 6,
'decommissioning_cost': 0.2,
'params_to_site_data_name': 'gen_site_map'}
exp_pc[2020] = {'size': 20,
'startup_data_name': 'gen_startup',
'time_period_yrs': 10,
'lifetime_yrs': 30,
'install_cost': 0,
'timestep_hrs': 1.0,
'capital_cost': 7,
'decommissioning_cost': 0.2,
'params_to_site_data_name': 'gen_site_map'}
exp_pc[2030] = {'size': 30,
'startup_data_name': 'gen_startup',
'time_period_yrs': 10,
'lifetime_yrs': 30,
'install_cost': 0,
'capital_cost': 8,
'timestep_hrs': 1.0,
'decommissioning_cost': 0.2,
'params_to_site_data_name': 'gen_site_map'}
exp_pc[2040] = {'size': 30,
'startup_data_name': 'gen_startup',
'time_period_yrs': 10,
'lifetime_yrs': 30,
'timestep_hrs': 1.0,
'capital_cost': 8,
'install_cost': 0,
'decommissioning_cost': 0.2,
'params_to_site_data_name': 'gen_site_map'}
exp_pc[2050] = {'size': 30,
'startup_data_name': 'gen_startup',
'time_period_yrs': 10,
'lifetime_yrs': 30,
'capital_cost': 8,
'install_cost': 0,
'timestep_hrs': 1.0,
'decommissioning_cost': 0.2,
'params_to_site_data_name': 'gen_site_map'}
self.assertTrue((tmg.period_configs == exp_pc))
# Now update the state from a list
state_handle = tmg.get_startup_state_handle()
new_cap = [(33, 1000, 2040), (44, 2000, 2010), (22, 1000, 2020)]
tmg.update_state_new_period_list(state_handle, 2010, new_cap)
exp_state_handle = {}
exp_state_handle['curr_period'] = 2010
exp_state_handle['capacity'] = cap_list = {}
exp_state_handle['history'] = hist_list = {}
cap_list[22] = [(100, 1990, 2020), (200, 2000, 2030), (1000, 2010, 2020)]
cap_list[11] = [(300, 2000, 2010)]
cap_list[44] = [(400, 1990, 2010), (1000, 1990, 2020), (2000, 2010, 2010)]
cap_list[33] = [(1000, 2010, 2040)]
self.assertTrue((exp_state_handle == state_handle))
# and get the list of sites
sites = tmg.get_site_indices(state_handle)
exp_sites = [11, 22, 33, 44]
self.assertTrue((exp_sites == sites))
# and the capacity and new_capacity costs
capacity = tmg.get_capacity(state_handle)
exp_capacity = [300.0, 1300.0, 1000.0, 3400.0]
self.assertTrue((exp_capacity == capacity))
total_new_cap_cost, new_capacity_costs = (
tmg.calculate_new_capacity_cost(state_handle))
exp_total_cost = 24000
exp_new_capacity_cost = [(22, 1000, 6000),
(33, 1000, 6000), (44, 2000, 12000)]
self.assertEqual(exp_total_cost, total_new_cap_cost)
self.assertTrue((exp_new_capacity_cost, new_capacity_costs))
# and decommission
total_decomm_cost, decomm = (
tmg.calculate_update_decommission(state_handle))
exp_total_cost = 540
exp_decomm = [(11, 300, 60), (44, 2400, 480)]
self.assertEqual(exp_total_cost, total_decomm_cost)
self.assertTrue((exp_decomm, decomm))
exp_state_handle = {}
exp_state_handle['curr_period'] = 2010
exp_state_handle['capacity'] = cap_list = {}
exp_state_handle['history'] = hist_list = {}
cap_list[22] = [(100, 1990, 2020), (200, 2000, 2030), (1000, 2010, 2020)]
cap_list[44] = [(1000, 1990, 2020)]
cap_list[33] = [(1000, 2010, 2040)]
hist_list[11] = [(300, 2000, 2010)]
hist_list[44] = [(400, 1990, 2010), (2000, 2010, 2010)]
self.assertTrue((exp_state_handle == state_handle))
# Add some more capacity in 2020, using the params this time
tmg.update_state_new_period_params(state_handle, 2020, numpy.array(
[0, 220, 0, 550, 110]))
exp_state_handle = {}
exp_state_handle['curr_period'] = 2020
exp_state_handle['capacity'] = cap_list = {}
exp_state_handle['history'] = hist_list
cap_list[11] = [(2200, 2020, 2040)]
cap_list[22] = [(100, 1990, 2020), (200, 2000, 2030), (1000, 2010, 2020),
(4400, 2020, 2040)]
cap_list[44] = [(1000, 1990, 2020)]
cap_list[33] = [(1000, 2010, 2040)]
cap_list[55] = [(11000, 2020, 2040)]
#pp.pprint(exp_state_handle)
#pp.pprint(state_handle)
self.assertTrue((exp_state_handle == state_handle))
# and check that get_startup_state still returns a clean one
startup_state = tmg.get_startup_state_handle()
exp_state = {}
exp_state['curr_period'] = None
exp_state['capacity'] = {}
exp_state['history'] = {}
cap_list = exp_state['capacity']
hist_list = exp_state['history']
cap_list[22] = [(100, 1990, 2020), (200, 2000, 2030)]
cap_list[11] = [(300, 2000, 2010)]
cap_list[44] = [(400, 1990, 2010), (1000, 1990, 2020)]
self.assertTrue((exp_state == startup_state))
def test_set_config(self):
config = {}
config['size'] = {2000: 10, 2010: 20, 2030: 30}
config['capital_cost'] = {2000: 5, 2010: 6, 2020: 7, 2030: 8}
config['decommissioning_cost'] = {2000: 0.1, 2010: 0.2}
config['time_period_yrs'] = 10
config['lifetime_yrs'] = {2000: 10, 2020: 30}
config['model'] = 'txmultigenerator'
config['section'] = 'Generator'
config['timestep_hrs'] = 1.0
tmg = txmultigeneratormultisite.TxMultiGeneratorMultiSite()
tmg.set_config(config, run_periods=[2000, 2010, 2020])
exp_pc = {}
exp_pc[2000] = {'size': 10.0,
'time_period_yrs': 10,
'params_to_site_data_name': '',
'startup_data_name': '',
'lifetime_yrs': 10,
'variable_cost_mult': 1.0,
'time_scale_up_mult': 1.0,
'carbon_price_m': 0.0,
'vom': 0.0,
'model': 'txmultigenerator',
'timestep_hrs': 1.0,
'section': 'Generator',
'startup_data_string': None,
'install_cost': 0,
'params_to_site_data_string': [],
'start_min_param': 1e20,
'start_max_param': 1e20,
'capital_cost': 5.0,
'decommissioning_cost': 0.1}
exp_pc[2010] = {'size': 20.0,
'time_period_yrs': 10,
'variable_cost_mult': 1.0,
'timestep_hrs': 1.0,
'params_to_site_data_name': '',
'startup_data_name': '',
'time_scale_up_mult': 1.0,
'carbon_price_m': 0.0,
'vom': 0.0,
'install_cost': 0,
'startup_data_string': None,
'params_to_site_data_string': [],
'start_min_param': 1e20,
'start_max_param': 1e20,
'lifetime_yrs': 10,
'model': 'txmultigenerator',
'section': 'Generator',
'capital_cost': 6.0,
'decommissioning_cost': 0.2}
exp_pc[2020] = {'size': 20.0,
'time_period_yrs': 10,
'variable_cost_mult': 1.0,
'vom': 0.0,
'install_cost': 0,
'time_scale_up_mult': 1.0,
'timestep_hrs': 1.0,
'params_to_site_data_name': '',
'startup_data_name': '',
'startup_data_string': None,
'params_to_site_data_string': [],
'start_min_param': 1e20,
'start_max_param': 1e20,
'carbon_price_m': 0.0,
'model': 'txmultigenerator',
'section': 'Generator',
'lifetime_yrs': 30,
'capital_cost': 7.0,
'decommissioning_cost': 0.2}
# import pprint
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(tmg.period_configs)
# pp.pprint(exp_pc)
# for (period, conf) in exp_pc.iteritems():
# for (key, value) in conf.iteritems():
# print key
# print (exp_pc[period][key] == tmg.period_configs[period][key])
self.assertTrue((tmg.period_configs == exp_pc))
def test_set_config_check_lifetime_dict(self):
config = {}
config['size'] = {2000: 10, 2010: 20, 2030: 30}
config['capital_cost'] = {2000: 5, 2010: 6, 2020: 7, 2030: 8}
config['decommissioning_cost'] = {2000: 0.1, 2010: 0.2}
config['time_period_yrs'] = 10
config['lifetime_yrs'] = {2000: 15, 2020: 30}
config['model'] = 'txmultigenerator'
config['section'] = 'Generator'
config['variable_cost_mult'] = 1
config['time_scale_up_mult'] = 1
config['carbon_price_m'] = 100
config['timestep_hrs'] = 1.0
tmg = txmultigeneratormultisite.TxMultiGeneratorMultiSite()
with self.assertRaises(mureilexception.ConfigException) as cm:
tmg.set_config(config, run_periods=[2000, 2010, 2020, 2030, 2040, 2050])
self.assertEqual(cm.exception.msg,
'In section Generator, lifetime_yrs = 15.0 which is required to be a multiple of time_period_yrs of 10.0')
def test_set_config_check_lifetime_scalar(self):
config = {}
config['size'] = {2000: 10, 2010: 20, 2030: 30}
config['capital_cost'] = {2000: 5, 2010: 6, 2020: 7, 2030: 8}
config['decommissioning_cost'] = {2000: 0.1, 2010: 0.2}
config['time_period_yrs'] = 10
config['lifetime_yrs'] = 8
config['model'] = 'txmultigenerator'
config['section'] = 'Generator'
config['variable_cost_mult'] = 1
config['time_scale_up_mult'] = 1
config['carbon_price_m'] = 100
config['timestep_hrs'] = 1.0
tmg = txmultigeneratormultisite.TxMultiGeneratorMultiSite()
with self.assertRaises(mureilexception.ConfigException) as | |
<gh_stars>0
"""
Propagator for CSP constraints.
"""
from collections import OrderedDict
import clingo
from .parsing import AbstractConstraintBuilder, simplify, parse_theory
from .util import measure_time_decorator, IntervalSet
from .base import Config, Statistics, InitClauseCreator, ControlClauseCreator
from .solver import State
from .constraints import SumConstraint, DistinctConstraint, MinimizeConstraint
class ConstraintBuilder(AbstractConstraintBuilder):
"""
CSP builder to use with the parse_theory function.
"""
def __init__(self, cc, propagator, minimize):
self._cc = cc
self._propagator = propagator
self._minimize = minimize
@property
def cc(self):
"""
Return a ClauseCreator.
"""
return self._cc
def add_show(self):
"""
Inform the builder that there is a show statement.
"""
self._propagator.show()
def show_signature(self, name, arity):
"""
Show variables with the given signature.
"""
self._propagator.show_signature(name, arity)
def show_variable(self, var):
"""
Show the given variable.
"""
self._propagator.show_variable(var)
def add_variable(self, var):
"""
Get the integer representing a variable.
"""
assert isinstance(var, clingo.Symbol)
return self._propagator.add_variable(var)
def add_constraint(self, lit, elems, rhs, strict):
"""
Add a constraint.
"""
if not strict and self.cc.assignment.is_false(lit):
return
if len(elems) == 1:
co, var = elems[0]
self._propagator.add_simple(self.cc, lit, co, var, rhs, strict)
else:
assert not strict
if self._propagator.config.sort_constraints:
elems.sort(key=lambda cv: -abs(cv[0]))
self._propagator.add_constraint(self.cc, SumConstraint(lit, elems, rhs))
def add_minimize(self, co, var):
"""
Add a term to the minimize constraint.
"""
if self._minimize is None:
self._minimize = MinimizeConstraint()
if co == 0:
return
self._minimize.elements.append((co, var))
def add_distinct(self, literal, elems):
"""
Add a distinct constraint.
Binary distinct constraints will be represented with a sum constraint.
"""
if self.cc.assignment.is_false(literal):
return
if len(elems) > 2:
self._propagator.add_constraint(self.cc, DistinctConstraint(literal, elems))
return
for i, (rhs_i, elems_i) in enumerate(elems):
for rhs_j, elems_j in elems[i+1:]:
rhs = rhs_i - rhs_j
celems = []
celems.extend(elems_i)
celems.extend((-co_j, var_j) for co_j, var_j in elems_j)
if not celems:
if rhs == 0:
self.cc.add_clause([-literal])
return
continue
a = self.cc.add_literal()
b = self.cc.add_literal()
self.cc.add_clause([a, b, -literal])
self.cc.add_clause([-a, -b])
self.add_constraint(a, celems, rhs-1, False)
self.add_constraint(b, [(-co, var) for co, var in celems], -rhs-1, False)
def add_dom(self, literal, var, elements):
"""
Add a domain for the given variable.
The domain is represented as a set of left-closed intervals.
"""
if self.cc.assignment.is_false(literal):
return
intervals = IntervalSet(elements)
self._propagator.add_dom(self.cc, literal, var, list(intervals))
def prepare_minimize(self):
"""
Prepare the minimize constraint.
"""
# simplify minimize
if self._minimize is not None:
adjust, self._minimize.elements = simplify(self._minimize.elements, True)
self._minimize.adjust += adjust
if self._propagator.config.sort_constraints:
self._minimize.elements.sort(key=lambda cv: -abs(cv[0]))
return self._minimize
class Propagator(object):
"""
A propagator for CSP constraints.
"""
def __init__(self):
self._l2c = {} # map literals to constraints
self._states = [] # map thread id to states
self._var_map = OrderedDict() # map from variable names to indices
self._minimize = None # minimize constraint
self._minimize_bound = None # bound of the minimize constraint
self._stats_step = Statistics() # statistics of the current call
self._stats_accu = Statistics() # accumulated statistics
self._translated_minimize = False # whether a minimize constraint has been translated
self.config = Config() # configuration
self._show = False # whether there is a show statement
self._show_variable = set() # variables to show
self._show_signature = set() # signatures to show
self.constraints = set() # constraints given by the translator
def _state(self, thread_id):
"""
Get the state associated with the given `thread_id`.
"""
while len(self._states) <= thread_id:
self._states.append(State(self._l2c, self.config.state_config(thread_id)))
return self._states[thread_id]
def on_model(self, model):
"""
Extend the model with the assignment and take care of minimization.
"""
shown = (var for var in self._var_map.items() if self.shown(var))
assignment = self._state(model.thread_id).get_assignment(shown)
model.extend(
clingo.Function("__csp", [var, value])
for var, value in assignment if self.shown(var))
if self.has_minimize:
bound = self.get_minimize_value(model.thread_id)
model.extend([clingo.Function("__csp_cost", [bound])])
if self._minimize_bound is None or bound-1 < self._minimize_bound:
self.statistics.cost = bound
self.update_minimize(bound-1)
@property
def statistics(self):
"""
Return statistics object.
"""
return self._stats_step
def on_statistics(self, step, akku):
"""
Callback to update `step` and `akku`mulated statistics.
"""
for s in self._states:
self._stats_step.tstats.append(s.statistics)
self._stats_accu.accu(self._stats_step)
self.add_statistics(step, self._stats_step)
self.add_statistics(akku, self._stats_accu)
self._stats_step.reset()
def add_statistics(self, stats_map, stats):
"""
Add collected statistics in `stats` to the clingo.StatisticsMap `stats_map`.
"""
def thread_stats(tstat): # pylint: disable=missing-docstring
p, c, u = tstat.time_propagate, tstat.time_check, tstat.time_undo
return OrderedDict([
("Time in seconds", OrderedDict([
("Total", p+c+u),
("Propagation", p),
("Check", c),
("Undo", u)])),
("Refined reason", tstat.refined_reason),
("Introduced reason", tstat.introduced_reason),
("Literals introduced ", tstat.literals)])
cost = []
if stats.cost is not None:
cost.append(("Cost", stats.cost))
stats_map["Clingcon"] = OrderedDict(cost + [
("Init time in seconds", OrderedDict([
("Total", stats.time_init),
("Simplify", stats.time_simplify),
("Translate", stats.time_translate)])),
("Problem", OrderedDict([
("Constraints", stats.num_constraints),
("Variables", stats.num_variables),
("Clauses", stats.num_clauses),
("Literals", stats.num_literals)])),
("Translate", OrderedDict([
("Constraints removed", stats.translate_removed),
("Constraints added", stats.translate_added),
("Clauses", stats.translate_clauses),
("Weight constraints", stats.translate_wcs),
("Literals", stats.translate_literals)])),
("Thread", map(thread_stats, stats.tstats[:len(self._states)]))])
def add_variable(self, var):
"""
Add a variable to the program.
"""
assert isinstance(var, clingo.Symbol)
if var not in self._var_map:
idx = self._state(0).add_variable(self.config.min_int, self.config.max_int)
self._var_map[var] = idx
self._stats_step.num_variables += 1
return self._var_map[var]
def show(self):
"""
Enable show statement.
If the show statement has not been enabled, then all variables are
shown.
"""
self._show = True
def show_variable(self, var):
"""
Show the given variable.
"""
self._show_variable.add(var)
def show_signature(self, name, arity):
"""
Show variables with the given signature.
"""
self._show_signature.add((name, arity))
def add_dom(self, cc, literal, var, domain):
"""
Add a domain for the given variable.
"""
return self._state(0).add_dom(cc, literal, var, domain)
def add_simple(self, cc, clit, co, var, rhs, strict):
"""
Add a constraint that can be represented by an order literal.
"""
return self._state(0).add_simple(cc, clit, co, var, rhs, strict)
def _add_constraint(self, cc, constraint):
"""
Add a constraint to the program that has already been added to the
master state.
"""
lit = constraint.literal
cc.add_watch(lit)
self._l2c.setdefault(lit, []).append(constraint)
def add_constraint(self, cc, constraint):
"""
Add a constraint to the program.
"""
self._state(0).add_constraint(constraint)
self._stats_step.num_constraints += 1
self._add_constraint(cc, constraint)
@measure_time_decorator("statistics.time_init")
def init(self, init):
"""
Initializes the propagator extracting constraints from the theory data.
The function handles reinitialization for multi-shot solving and
multi-threaded solving.
"""
init.check_mode = clingo.PropagatorCheckMode.Fixpoint
cc = InitClauseCreator(init, self.statistics)
# remove minimize constraint
minimize = self.remove_minimize()
# remove solve step local and fixed literals
for state in self._states:
state.update(cc)
# add constraints
builder = ConstraintBuilder(cc, self, minimize)
if len(self.constraints) == 0:
parse_theory(builder, init.theory_atoms)
else:
parse_theory(builder, self.constraints)
# gather bounds of states in master
master = self._state(0)
for state in self._states[1:]:
if not master.update_bounds(cc, state):
return
# propagate the newly added constraints
if not self._simplify(cc, master):
return
# remove unnecessary literals after simplification
if not master.cleanup_literals(cc):
return
# translate (simple enough) constraints
if not self._translate(cc, master, builder.prepare_minimize()):
return
# copy order literals from master to other states
del self._states[init.number_of_threads:]
for i in range(1, init.number_of_threads):
self._state(i).copy_state(master)
@measure_time_decorator("statistics.time_simplify")
def _simplify(self, cc, master):
"""
Propagate constraints refining bounds.
"""
try:
return master.simplify(cc, self.config.check_state)
finally:
# Note: During simplify propagate and check are called, which can
# produce large timings for the master thread.
master.statistics.time_propagate = 0
master.statistics.time_check = 0
@measure_time_decorator("statistics.time_translate")
def _translate(self, cc, master, minimize):
"""
Translates constraints and take care of handling the minimize
constraint.
"""
# add minimize constraint
# Note: the minimize constraint is added after simplification to avoid
# propagating tagged clauses, which is not supported at the moment.
if minimize is not None:
# Note: fail if translation was requested earlier
if self._translated_minimize and not self.config.translate_minimize:
raise RuntimeError("translation of minimize constraints is disabled but was enabled before")
self.add_minimize(cc, minimize)
# translate (simple enough) constraints
cc.set_state(InitClauseCreator.StateTranslate)
ret, added = master.translate(cc, self._l2c, self.statistics, self.config)
if not ret:
return False
for constraint in added:
self._add_constraint(cc, constraint)
cc.set_state(InitClauseCreator.StateInit)
# mark minimize constraint as translated if necessary
if self.config.translate_minimize and self._minimize is not None:
self._translated_minimize = True
self._minimize = None
return True
def propagate(self, control, changes):
"""
Delegates propagation to the respective state.
"""
state = self._state(control.thread_id)
state.propagate(ControlClauseCreator(control, state.statistics), changes)
def check(self, control):
"""
Delegates checking to the respective state and makes sure that all
order variables are assigned if the assigment is total.
"""
size = len(control.assignment)
state = self._state(control.thread_id)
dl = control.assignment.decision_level
if self.has_minimize and self._minimize_bound is not None:
bound = self._minimize_bound + self._minimize.adjust
state.update_minimize(self._minimize, dl, bound)
if not state.check(ControlClauseCreator(control, state.statistics), self.config.check_state):
return
# Note: Makes sure that all variables are assigned in the end. But even
# if the assignment is total, we do not have to introduce fresh
# variables if variables have been introduced during check. In this
# case, there is a guaranteed follow-up propagate call because all
# newly introduced | |
#!/usr/bin/env python3
desc="""Requiggle basecalled FastQ files and features in BAM file.
For all reference bases we store (as BAM comments):
- normalised signal intensity mean [tag si:B,f]
- reference base probability [tag tr:B:C] retrieved from guppy (trace scaled 0-255)
- dwell time [tag dt:B:C] in signal step capped at 255
All features are matched versus padded reference sequnce blocks
ie excluding introns and large (padded) deletions from reference.
Those blocks (2-D array of start & ends) are stored as flattened 1-D array [tag bs:B:i]
ie. exons [(8114, 8244), (8645, 8797)] will be stored as array('I', [8114, 8244, 8645, 8797]).
--rna will automatically enable spliced alignments.
"""
epilog="""Author: <EMAIL>
Cologne/Barcelona/Mizerów, 17/06/2020
"""
import itertools, json, os, resource, scipy, subprocess, sys, numpy as np, pysam, tempfile
from tombo import tombo_stats, resquiggle, tombo_helper
from tombo._default_parameters import OUTLIER_THRESH, SHIFT_CHANGE_THRESH, SCALE_CHANGE_THRESH, RNA_SAMP_TYPE, DNA_SAMP_TYPE, COLLAPSE_RNA_STALLS, COLLAPSE_DNA_STALLS, STALL_PARAMS#, FM_OFFSET_DEFAULT
from ont_fast5_api.fast5_interface import get_fast5_file
from datetime import datetime
from multiprocessing import Pool
from array import array
from copy import deepcopy
# add PATH - needed by fast5_to_fastq.py
os.environ["PATH"] = "%s:%s"%(':'.join(sys.path), os.environ["PATH"])
VERSION = '0.11b'
DEFAULT_STALL_PARAMS = tombo_helper.stallParams(**STALL_PARAMS)
USE_START_CLIP_BASES = resquiggle.USE_START_CLIP_BASES
# only DNA bases as in SAM U is always referred as T
bases = "ACGT"
base2idx = {b: i for i, b in enumerate(bases)}
base2complement = {"A": "T", "T": "A", "C": "G", "G": "C", "N": "N"}
# add lower-case for get_aligned_pairs as it reports substitutions as lower-case
for b, i in list(base2idx.items()): base2idx[b.lower()] = i
for b, c in list(base2complement.items()): base2complement[b.lower()] = c
def minimap2_proc(ref, fast5, threads=1, spliced=0, sensitive=1):
"""Run minimap2 and return its stdout"""
mode = ["-axmap-ont", ]
if spliced:
mode = ["-axsplice", "-uf"]
args1 = ["minimap2", "--MD", "-Y", "-t%s"%threads] + mode
if sensitive:
args1 += ["-k7", "-w5", "-m20", "-A6", "-B4"]
args1 += [ref, "-"]
# fast5_to_fastq
args0 = ["fast5_to_fastq.py", "-i%s"%fast5]
proc0 = subprocess.Popen(args0, stdout=subprocess.PIPE)
# minimap2
proc1 = subprocess.Popen(args1, stdin=proc0.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return proc1
def adjust_map_res(map_res, seq_samp_type, rsqgl_params, TRIM_RNA_ADAPTER=False):
if seq_samp_type.name == RNA_SAMP_TYPE:
if TRIM_RNA_ADAPTER:
# trim DNA adapter off of RNA signal
adapter_end = tombo_stats.trim_rna(map_res.raw_signal, rsqgl_params)
# trim off adapter
map_res = map_res._replace(raw_signal=map_res.raw_signal[adapter_end:])
# flip raw signal for re-squiggling
map_res = map_res._replace(raw_signal=map_res.raw_signal[::-1])
elif seq_samp_type.name == DNA_SAMP_TYPE and USE_START_CLIP_BASES:
# flip raw signal, genome and start clip seqs for re-squiggling
map_res = map_res._replace(
raw_signal=map_res.raw_signal[::-1],
genome_seq=map_res.genome_seq[::-1])
if ((COLLAPSE_RNA_STALLS and seq_samp_type.name == RNA_SAMP_TYPE) or
(COLLAPSE_DNA_STALLS and seq_samp_type.name == DNA_SAMP_TYPE)):
map_res = map_res._replace(stall_ints=tombo_stats.identify_stalls(map_res.raw_signal, DEFAULT_STALL_PARAMS))
return map_res
def adjust_rsqgl_res(rsqgl_res, all_raw_signal, seq_samp_type, USE_START_CLIP_BASES=False):
if seq_samp_type.name == DNA_SAMP_TYPE and USE_START_CLIP_BASES:
# flip raw signal and events back for storage in genome direction
rev_rsrtr = (all_raw_signal.shape[0] -
rsqgl_res.read_start_rel_to_raw -
rsqgl_res.segs[-1])
rev_segs = -1 * (rsqgl_res.segs[::-1] - rsqgl_res.segs[-1])
rsqgl_res = rsqgl_res._replace(
read_start_rel_to_raw=rev_rsrtr, segs=rev_segs,
genome_seq=rsqgl_res.genome_seq[::-1],
raw_signal=rsqgl_res.raw_signal[::-1])
return rsqgl_res
def map_read(a, faidx, seq_samp_type, std_ref, ref2len):
"""Get resquiggle result with read alignement info"""
seq_data = tombo_helper.sequenceData(seq=a.seq, id=a.qname, mean_q_score=np.mean(a.query_qualities))
# get chrom, start and end
chrm, ref_start, ref_end = a.reference_name, a.reference_start, a.reference_end
# store strand & number of clipped bases relative to read sequence
if a.is_reverse:
strand = "-"
num_start_clipped_bases = len(seq_data.seq) - a.qend
num_end_clipped_bases = a.qstart
else:
strand = "+"
num_start_clipped_bases = a.qstart
num_end_clipped_bases = len(seq_data.seq) - a.qend
# 'ID', 'Subgroup', 'ClipStart', 'ClipEnd', 'Insertions', 'Deletions', 'Matches', 'Mismatches'
align_info = tombo_helper.alignInfo(seq_data.id, "", num_start_clipped_bases, num_end_clipped_bases,
0, 0, a.alen, 0) # this isn't used anywhere, so just don't bother computing it!
# extract genome sequence from mappy aligner
# expand sequence to get model levels for all sites (need to handle new
# sequence coordinates downstream)
start_skip = 0
# get exonic blocks
blocks = get_exonic_blocks(a)
align_info.blocks = deepcopy(blocks)
dnstrm_bases = std_ref.kmer_width - std_ref.central_pos - 1
if ((seq_samp_type.name == RNA_SAMP_TYPE and strand == '+') or
(seq_samp_type.name == DNA_SAMP_TYPE and strand == '-' and USE_START_CLIP_BASES) or
(seq_samp_type.name == DNA_SAMP_TYPE and strand == '+' and not USE_START_CLIP_BASES)):
if ref_start < std_ref.central_pos:
start_skip = std_ref.central_pos-ref_start
ref_start = std_ref.central_pos
ref_seq_start = ref_start - std_ref.central_pos
ref_seq_end = ref_end + dnstrm_bases
else:
if ref_start < dnstrm_bases:
start_skip = dnstrm_bases-ref_start
ref_start = dnstrm_bases
ref_seq_start = ref_start - dnstrm_bases
ref_seq_end = ref_end + std_ref.central_pos
# update blocks start & end with kmer specific shifts - this sequence won't be saved!
blocks[0][0] = ref_seq_start
blocks[-1][1] = ref_seq_end
# get exonic sequence
genome_seq = "".join([faidx.fetch(chrm, s, e) for s, e in blocks])
# get missing bases in the end
end_skip = 0 if blocks[-1][1]<=ref2len[chrm] else blocks[-1][1]-ref2len[chrm]
# enlarge genome seq by missing bits from ends with (random!) bases - As for now
if start_skip or end_skip:
genome_seq = "A"*start_skip + genome_seq + "A"*end_skip
if strand == '-':
genome_seq = tombo_helper.rev_comp(genome_seq)
# store enlarged genome for P-value calculation, so no trimming needed later :)
genome_seq = genome_seq.upper() #.upper() is important to correctly process soft-masked sequences
align_info.refseq = genome_seq.upper() # res.genome_seq is altered during find_adaptive_assignment
genome_loc = tombo_helper.genomeLocation(ref_start, strand, chrm)
return tombo_helper.resquiggleResults(align_info, genome_loc, genome_seq, seq_data.mean_q_score)
def get_exonic_blocks(a):
"""Return exonic blocks this is start-end reference-based
for consecutive exons covered by given read.
Note, those are not necesarily exact exons, just exons infered from read alignment.
"""
blocks = []
s = e = a.pos
# iter read blocks
for code, bases in a.cigar:
# count blocks that alter reference positions (ignore ie insertions [1])
if code in (0, 2, 7, 8): e += bases
# exclude introns - those are reported as reference-padded alignment part
elif code == 3:
blocks.append([s, e])
s = e + bases
e = s
# store exon after last intron (or entire transcript if no introns)
blocks.append([s, e])
return blocks
def resquiggle_reads(multifast5_fn, aligner, ref, seq_samp_type, std_ref, rsqgl_params,
outlier_thresh=OUTLIER_THRESH, max_scaling_iters=3, max_per_ref=0,
valid_bases=set(list('ACGT'))):
ref2c = {}
# process reads from multi fast5
faidx = pysam.FastaFile(ref)
ref2len = {r: l for r, l in zip(faidx.references, faidx.lengths)}#; ref2len
f5file = get_fast5_file(multifast5_fn, mode="r")
for a in aligner:
# process only given number of reads per reference
if max_per_ref:
contig = a.reference_name #map_results.genome_loc.Chrom
if contig in ref2c:
if ref2c[contig]>=max_per_ref: continue
else: ref2c[contig] = 0
# skip reads without alignment or secondary/qcfails
if a.is_unmapped or a.is_secondary or a.is_qcfail:
yield None, "No alignment" if a.is_unmapped else "Secondary alignment"
continue
# get alignment data
map_results = map_read(a, faidx, seq_samp_type, std_ref, ref2len)
# make sure only ACGT chars in reference!
if set(map_results.genome_seq).difference(valid_bases):
yield None, "Non-ACGT sequence" # instead maybe just replace by random char?
continue
# extract data from FAST5
read = f5file.get_read(a.qname) #read_id)
all_raw_signal = read.get_raw_data(scale=False)
map_results = map_results._replace(raw_signal=all_raw_signal)
try:
# this causes sometimes TomboError: Read event to sequence alignment extends beyond bandwidth
map_results = adjust_map_res(map_results, seq_samp_type, rsqgl_params)
rsqgl_res = resquiggle.resquiggle_read(map_results, std_ref, rsqgl_params, outlier_thresh)
n_iters = 1
while n_iters < max_scaling_iters and rsqgl_res.norm_params_changed:
rsqgl_res = resquiggle.resquiggle_read(map_results._replace(scale_values=rsqgl_res.scale_values),
std_ref, rsqgl_params, outlier_thresh)
n_iters += 1
except Exception as inst:
yield None, str(inst)
continue
rsqgl_res = adjust_rsqgl_res(rsqgl_res, all_raw_signal, seq_samp_type)
# add alignment and read as those are needed later
rsqgl_res.a, rsqgl_res.read = a, read
# update ref counter
if ref2c: ref2c[contig] += 1
yield rsqgl_res, ""
def get_norm_mean(raw, segs):
"""Return raw signal means for given segments."""
return np.array([raw[segs[i]:segs[i+1]].mean() for i in range(len(segs)-1)])
def get_trace_for_reference_bases(a, read, rna, func=np.mean):
"""Return reference-aligned trace for tr (ref base), tA, tC, tG, tT"""
def get_bidx_fwd(b): return base2idx[b]
def get_bidx_rev(b): return base2idx[base2complement[b]]
# trace for reference bases
tr = np.zeros(a.reference_length, dtype="uint8")
# trace and move data from read
bcgrp = read.get_latest_analysis("Basecall_1D")
trace = read.get_analysis_dataset(bcgrp, "BaseCalled_template/Trace")
if trace is None:
logger("[ERROR] Trace table is missing in Fast5 file! Basecall Fast5 files again using --fast5_out option. ")
return tr
move = read.get_analysis_dataset(bcgrp, "BaseCalled_template/Move")
move_pos = np.append(np.argwhere(move==1).flatten(), len(trace)) # add end of trace
# combine flip & flop probabilities
## here we get sum of flip & flop. maybe get just one? but flop is usually lower...
trace[:, :len(bases)] += trace[:, len(bases):]
trace = trace[:, :len(bases)]
# here we need to remember that DNA 5'>3', but RNA 3'>5'
# plus the strand matters
if a.is_reverse: # for REV alg
get_bidx = get_bidx_rev # take complement base
if not rna: move_pos = move_pos[::-1] # reverse move_pos for DNA
else: # for FWD alg
get_bidx = get_bidx_fwd # take base
if rna: move_pos = move_pos[::-1] # reverse move_pos for RNA
| |
0,
'end_modifications_member_start_slope_z': 0,
'end_modifications_member_end_extension': 0,
'end_modifications_member_end_slope_y': 0,
'end_modifications_member_end_slope_z': 0,
'is_deactivated_for_calculation' : False},
model = Model):
"""
Args:
no (int): Member Tag
start_node_no (int): Tag of Start Node
end_node_no (int): Tag of End Node
rotation_specification_type (enum): Rotation Specification Type Enumeration
rotation_parameters (list): Rotation Parameters
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE:
rotation_parameters = [rotation_angle]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_HELP_NODE:
rotation_parameters = [rotation_help_node, rotation_plane_type]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_INSIDE_NODE:
rotation_parameters = [rotation_plane_type]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_SURFACE:
rotation_parameters = [rotation_surface, rotation_surface_plane_type]
section_no (int): Section Tag
line (int, optional): Assigned Line
comment (str, optional): Comment
params (dict, optional): Any WS Parameter relevant to the object and its value in form of a dictionary
"""
# Client model | Member
clientObject = model.clientModel.factory.create('ns0:member')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member No.
clientObject.no = no
# Member Type
clientObject.type = MemberType.TYPE_TENSION.name
# Start Node No.
clientObject.node_start = start_node_no
# End Node No.
clientObject.node_end = end_node_no
# Member Rotation
clientObject.rotation_specification_type = rotation_specification_type.name
if rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE:
clientObject.rotation_angle = rotation_parameters[0]
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_HELP_NODE:
clientObject.rotation_help_node = rotation_parameters[0]
clientObject.rotation_plane_type = rotation_parameters[1].name
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_INSIDE_NODE:
clientObject.rotation_plane_type = rotation_parameters[0].name
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_SURFACE:
clientObject.rotation_surface = rotation_parameters[0]
clientObject.rotation_surface_plane_type = rotation_parameters[1].name
# Start Section No.
clientObject.section_start = section_no
# End Section No.
clientObject.section_end = section_no
# Update parameters
params_up: dict = {'member_eccentricity_start': 0, 'member_eccentricity_end': 0,
'member_nonlinearity': 0,
'end_modifications_member_start_extension': 0,
'end_modifications_member_start_slope_y': 0,
'end_modifications_member_start_slope_z': 0,
'end_modifications_member_end_extension': 0,
'end_modifications_member_end_slope_y': 0,
'end_modifications_member_end_slope_z': 0,
'is_deactivated_for_calculation' : False }
params_up.update(params)
# Member Eccentricity
clientObject.member_eccentricity_start = params_up['member_eccentricity_start']
clientObject.member_eccentricity_end = params_up['member_eccentricity_end']
# Member Nonlinearity
clientObject.member_nonlinearity = params_up['member_nonlinearity']
# End Modifications
clientObject.end_modifications_member_start_extension = params_up['end_modifications_member_start_extension']
clientObject.end_modifications_member_start_slope_y = params_up['end_modifications_member_start_slope_y']
clientObject.end_modifications_member_start_slope_z = params_up['end_modifications_member_start_slope_z']
clientObject.end_modifications_member_end_extension = params_up['end_modifications_member_end_extension']
clientObject.end_modifications_member_end_slope_y = params_up['end_modifications_member_end_slope_y']
clientObject.end_modifications_member_end_slope_z = params_up['end_modifications_member_end_slope_z']
# Deactivation for Calculation
clientObject.is_deactivated_for_calculation = params_up['is_deactivated_for_calculation']
# Assigned Line No.
clientObject.line = line
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
if params:
for key in params:
clientObject[key] = params[key]
# Add Member to client model
model.clientModel.service.set_member(clientObject)
@staticmethod
def Compression(
no: int = 1,
start_node_no: int = 1,
end_node_no: int = 2,
rotation_specification_type = MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE,
rotation_parameters = [0],
section_no: int = 1,
line = None,
comment: str = '',
params: dict = {'member_eccentricity_start': 0, 'member_eccentricity_end': 0,
'member_nonlinearity': 0,
'end_modifications_member_start_extension': 0,
'end_modifications_member_start_slope_y': 0,
'end_modifications_member_start_slope_z': 0,
'end_modifications_member_end_extension': 0,
'end_modifications_member_end_slope_y': 0,
'end_modifications_member_end_slope_z': 0,
'is_deactivated_for_calculation' : False},
model = Model):
"""
Args:
no (int): Member Tag
start_node_no (int): Tag of Start Node
end_node_no (int): Tag of End Node
rotation_specification_type (enum): Rotation Specification Type Enumeration
rotation_parameters (list): Rotation Parameters
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE:
rotation_parameters = [rotation_angle]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_HELP_NODE:
rotation_parameters = [rotation_help_node, rotation_plane_type]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_INSIDE_NODE:
rotation_parameters = [rotation_plane_type]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_SURFACE:
rotation_parameters = [rotation_surface, rotation_surface_plane_type]
section_no (int): Section Tag
line (int, optional): Assigned Line
comment (str, optional): Comment
params (dict, optional): Any WS Parameter relevant to the object and its value in form of a dictionary
"""
# Client model | Member
clientObject = model.clientModel.factory.create('ns0:member')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member No.
clientObject.no = no
# Member Type
clientObject.type = MemberType.TYPE_COMPRESSION.name
# Start Node No.
clientObject.node_start = start_node_no
# End Node No.
clientObject.node_end = end_node_no
# Member Rotation
clientObject.rotation_specification_type = rotation_specification_type.name
if rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE:
clientObject.rotation_angle = rotation_parameters[0]
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_HELP_NODE:
clientObject.rotation_help_node = rotation_parameters[0]
clientObject.rotation_plane_type = rotation_parameters[1].name
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_INSIDE_NODE:
clientObject.rotation_plane_type = rotation_parameters[0].name
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_SURFACE:
clientObject.rotation_surface = rotation_parameters[0]
clientObject.rotation_surface_plane_type = rotation_parameters[1].name
# Start Section No.
clientObject.section_start = section_no
# End Section No.
clientObject.section_end = section_no
# Update parameters
params_up: dict = {'member_eccentricity_start': 0, 'member_eccentricity_end': 0,
'member_nonlinearity': 0,
'end_modifications_member_start_extension': 0,
'end_modifications_member_start_slope_y': 0,
'end_modifications_member_start_slope_z': 0,
'end_modifications_member_end_extension': 0,
'end_modifications_member_end_slope_y': 0,
'end_modifications_member_end_slope_z': 0,
'is_deactivated_for_calculation' : False }
params_up.update(params)
# Member Eccentricity
clientObject.member_eccentricity_start = params_up['member_eccentricity_start']
clientObject.member_eccentricity_end = params_up['member_eccentricity_end']
# Member Nonlinearity
clientObject.member_nonlinearity = params_up['member_nonlinearity']
# End Modifications
clientObject.end_modifications_member_start_extension = params_up['end_modifications_member_start_extension']
clientObject.end_modifications_member_start_slope_y = params_up['end_modifications_member_start_slope_y']
clientObject.end_modifications_member_start_slope_z = params_up['end_modifications_member_start_slope_z']
clientObject.end_modifications_member_end_extension = params_up['end_modifications_member_end_extension']
clientObject.end_modifications_member_end_slope_y = params_up['end_modifications_member_end_slope_y']
clientObject.end_modifications_member_end_slope_z = params_up['end_modifications_member_end_slope_z']
# Deactivation for Calculation
clientObject.is_deactivated_for_calculation = params_up['is_deactivated_for_calculation']
# Assigned Line No.
clientObject.line = line
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
if params:
for key in params:
clientObject[key] = params[key]
# Add Member to client model
model.clientModel.service.set_member(clientObject)
@staticmethod
def Buckling(
no: int = 1,
start_node_no: int = 1,
end_node_no: int = 2,
rotation_specification_type = MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE,
rotation_parameters = [0],
section_no: int = 1,
line = None,
comment: str = '',
params: dict = {'member_eccentricity_start': 0, 'member_eccentricity_end': 0,
'member_nonlinearity': 0,
'end_modifications_member_start_extension': 0,
'end_modifications_member_start_slope_y': 0,
'end_modifications_member_start_slope_z': 0,
'end_modifications_member_end_extension': 0,
'end_modifications_member_end_slope_y': 0,
'end_modifications_member_end_slope_z': 0,
'is_deactivated_for_calculation' : False},
model = Model):
"""
Args:
no (int): Member Tag
start_node_no (int): Tag of Start Node
end_node_no (int): Tag of End Node
rotation_specification_type (enum): Rotation Specification Type Enumeration
rotation_parameters (list): Rotation Parameters
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE:
rotation_parameters = [rotation_angle]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_HELP_NODE:
rotation_parameters = [rotation_help_node, rotation_plane_type]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_INSIDE_NODE:
rotation_parameters = [rotation_plane_type]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_SURFACE:
rotation_parameters = [rotation_surface, rotation_surface_plane_type]
section_no (int): Section Tag
line (int, optional): Assigned Line
comment (str, optional): Comment
params (dict, optional): Any WS Parameter relevant to the object and its value in form of a dictionary
"""
# Client model | Member
clientObject = model.clientModel.factory.create('ns0:member')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member No.
clientObject.no = no
# Member Type
clientObject.type = MemberType.TYPE_BUCKLING.name
# Start Node No.
clientObject.node_start = start_node_no
# End Node No.
clientObject.node_end = end_node_no
# Member Rotation
clientObject.rotation_specification_type = rotation_specification_type.name
if rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE:
clientObject.rotation_angle = rotation_parameters[0]
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_HELP_NODE:
clientObject.rotation_help_node = rotation_parameters[0]
clientObject.rotation_plane_type = rotation_parameters[1].name
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_INSIDE_NODE:
clientObject.rotation_plane_type = rotation_parameters[0].name
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_SURFACE:
clientObject.rotation_surface = rotation_parameters[0]
clientObject.rotation_surface_plane_type = rotation_parameters[1].name
# Start Section No.
clientObject.section_start = section_no
# End Section No.
clientObject.section_end = section_no
# Update parameters
params_up: dict = {'member_eccentricity_start': 0, 'member_eccentricity_end': 0,
'member_nonlinearity': 0,
'end_modifications_member_start_extension': 0,
'end_modifications_member_start_slope_y': 0,
'end_modifications_member_start_slope_z': 0,
'end_modifications_member_end_extension': 0,
'end_modifications_member_end_slope_y': 0,
'end_modifications_member_end_slope_z': 0,
'is_deactivated_for_calculation' : False }
params_up.update(params)
# Member Eccentricity
clientObject.member_eccentricity_start = params_up['member_eccentricity_start']
clientObject.member_eccentricity_end = params_up['member_eccentricity_end']
# Member Nonlinearity
clientObject.member_nonlinearity = params_up['member_nonlinearity']
# End Modifications
clientObject.end_modifications_member_start_extension = params_up['end_modifications_member_start_extension']
clientObject.end_modifications_member_start_slope_y = params_up['end_modifications_member_start_slope_y']
clientObject.end_modifications_member_start_slope_z = params_up['end_modifications_member_start_slope_z']
clientObject.end_modifications_member_end_extension = params_up['end_modifications_member_end_extension']
clientObject.end_modifications_member_end_slope_y = params_up['end_modifications_member_end_slope_y']
clientObject.end_modifications_member_end_slope_z = params_up['end_modifications_member_end_slope_z']
# Deactivation for Calculation
clientObject.is_deactivated_for_calculation = params_up['is_deactivated_for_calculation']
# Assigned Line No.
clientObject.line = line
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
if params:
for key in params:
clientObject[key] = params[key]
# Add Member to client model
model.clientModel.service.set_member(clientObject)
@staticmethod
def Cable(
no: int = 1,
start_node_no: int = 1,
end_node_no: int = 2,
rotation_specification_type = MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE,
rotation_parameters = [0],
section_no: int = 1,
line = None,
comment: str = '',
params: dict = {'end_modifications_member_start_extension': 0,
'end_modifications_member_start_slope_y': 0,
'end_modifications_member_start_slope_z': 0,
'end_modifications_member_end_extension': 0,
'end_modifications_member_end_slope_y': 0,
'end_modifications_member_end_slope_z': 0,
'is_deactivated_for_calculation' : False},
model = Model):
"""
Args:
no (int): Member Tag
start_node_no (int): Tag of Start Node
end_node_no (int): Tag of End Node
rotation_specification_type (enum): Rotation Specification Type Enumeration
rotation_parameters (list): Rotation Parameters
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE:
rotation_parameters = [rotation_angle]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_HELP_NODE:
rotation_parameters = [rotation_help_node, rotation_plane_type]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_INSIDE_NODE:
rotation_parameters = [rotation_plane_type]
for rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_SURFACE:
rotation_parameters = [rotation_surface, rotation_surface_plane_type]
section_no (int): Section Tag
line (int, optional): Assigned Line
comment (str, optional): Comment
params (dict, optional): Any WS Parameter relevant to the object and its value in form of a dictionary
"""
# Client model | Member
clientObject = model.clientModel.factory.create('ns0:member')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member No.
clientObject.no = no
# Member Type
clientObject.type = MemberType.TYPE_CABLE.name
# Start Node No.
clientObject.node_start = start_node_no
# End Node No.
clientObject.node_end = end_node_no
# Member Rotation
clientObject.rotation_specification_type = rotation_specification_type.name
if rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_ANGLE:
clientObject.rotation_angle = rotation_parameters[0]
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_HELP_NODE:
clientObject.rotation_help_node = rotation_parameters[0]
clientObject.rotation_plane_type = rotation_parameters[1].name
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_INSIDE_NODE:
clientObject.rotation_plane_type = rotation_parameters[0].name
elif rotation_specification_type == MemberRotationSpecificationType.COORDINATE_SYSTEM_ROTATION_VIA_SURFACE:
clientObject.rotation_surface = rotation_parameters[0]
clientObject.rotation_surface_plane_type = rotation_parameters[1].name
# Start Section No.
clientObject.section_start = section_no
# End Section No.
clientObject.section_end = section_no
# Update parameters
params_up: dict = {'end_modifications_member_start_extension': 0,
'end_modifications_member_start_slope_y': 0,
'end_modifications_member_start_slope_z': 0,
'end_modifications_member_end_extension': 0,
'end_modifications_member_end_slope_y': 0,
'end_modifications_member_end_slope_z': 0,
'is_deactivated_for_calculation' : | |
:class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
'sync': sync,
})
class VersionWhitelist(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.upgrade.version_whitelist'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _VersionWhitelistStub)
def get(self,
component_type,
):
"""
Get whitelist of versions for a component. Component can include HOST,
EDGE, CCP, MP
:type component_type: :class:`str`
:param component_type: (required)
:rtype: :class:`com.vmware.nsx.model_client.AcceptableComponentVersion`
:return: com.vmware.nsx.model.AcceptableComponentVersion
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'component_type': component_type,
})
def list(self):
"""
Get whitelist of versions for different components
:rtype: :class:`com.vmware.nsx.model_client.AcceptableComponentVersionList`
:return: com.vmware.nsx.model.AcceptableComponentVersionList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list', None)
def update(self,
component_type,
version_list,
):
"""
Update the version whitelist for the specified component type (HOST,
EDGE, CCP, MP).
:type component_type: :class:`str`
:param component_type: (required)
:type version_list: :class:`com.vmware.nsx.model_client.VersionList`
:param version_list: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'component_type': component_type,
'version_list': version_list,
})
class _BundlesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'upgrade_bundle_fetch_request': type.ReferenceType('com.vmware.nsx.model_client', 'UpgradeBundleFetchRequest'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/upgrade/bundles',
request_body_parameter='upgrade_bundle_fetch_request',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'bundle_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/upgrade/bundles/{bundle-id}',
path_variables={
'bundle_id': 'bundle-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'UpgradeBundleId'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'UpgradeBundleInfo'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.upgrade.bundles',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _HistoryStub(ApiInterfaceStub):
def __init__(self, config):
# properties for list operation
list_input_type = type.StructType('operation-input', {})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/upgrade/history',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'UpgradeHistoryList'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'list': list_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.upgrade.history',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _NodesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for list operation
list_input_type = type.StructType('operation-input', {
'component_type': type.OptionalType(type.StringType()),
'component_version': type.OptionalType(type.StringType()),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/upgrade/nodes',
path_variables={
},
query_parameters={
'component_type': 'component_type',
'component_version': 'component_version',
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
operations = {
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'NodeInfoListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'list': list_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.upgrade.nodes',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _NodesSummaryStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/upgrade/nodes-summary',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'NodeSummaryList'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.upgrade.nodes_summary',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _PlanStub(ApiInterfaceStub):
def __init__(self, config):
# properties for continue operation
continue_input_type = type.StructType('operation-input', {
'skip': type.OptionalType(type.BooleanType()),
})
continue_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.concurrent_change':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
continue_input_value_validator_list = [
]
continue_output_validator_list = [
]
continue_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/upgrade/plan?action=continue',
path_variables={
},
query_parameters={
'skip': 'skip',
},
content_type='application/json'
)
# properties for pause operation
pause_input_type = type.StructType('operation-input', {})
pause_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.concurrent_change':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
pause_input_value_validator_list = [
]
pause_output_validator_list = [
]
pause_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/upgrade/plan?action=pause',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for reset operation
reset_input_type = type.StructType('operation-input', {
'component_type': type.StringType(),
})
reset_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
reset_input_value_validator_list = [
]
reset_output_validator_list = [
]
reset_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/upgrade/plan?action=reset',
path_variables={
},
query_parameters={
'component_type': 'component_type',
},
content_type='application/json'
)
# properties for start operation
start_input_type = type.StructType('operation-input', {})
start_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
start_input_value_validator_list = [
]
start_output_validator_list = [
]
start_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/upgrade/plan?action=start',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for upgradeselectedunits operation
upgradeselectedunits_input_type = type.StructType('operation-input', {
'upgrade_unit_list': type.ReferenceType('com.vmware.nsx.model_client', 'UpgradeUnitList'),
})
upgradeselectedunits_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.concurrent_change':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
upgradeselectedunits_input_value_validator_list = [
]
upgradeselectedunits_output_validator_list = [
]
upgradeselectedunits_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/upgrade/plan?action=upgrade_selected_units',
request_body_parameter='upgrade_unit_list',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'continue': {
'input_type': continue_input_type,
'output_type': type.VoidType(),
'errors': continue_error_dict,
'input_value_validator_list': continue_input_value_validator_list,
'output_validator_list': continue_output_validator_list,
'task_type': TaskType.NONE,
},
'pause': {
'input_type': pause_input_type,
'output_type': type.VoidType(),
'errors': pause_error_dict,
'input_value_validator_list': pause_input_value_validator_list,
'output_validator_list': pause_output_validator_list,
'task_type': TaskType.NONE,
},
'reset': {
'input_type': reset_input_type,
'output_type': type.VoidType(),
'errors': reset_error_dict,
'input_value_validator_list': reset_input_value_validator_list,
'output_validator_list': reset_output_validator_list,
'task_type': TaskType.NONE,
},
'start': {
'input_type': start_input_type,
'output_type': type.VoidType(),
'errors': start_error_dict,
'input_value_validator_list': start_input_value_validator_list,
'output_validator_list': start_output_validator_list,
'task_type': TaskType.NONE,
},
'upgradeselectedunits': {
'input_type': upgradeselectedunits_input_type,
'output_type': type.VoidType(),
'errors': upgradeselectedunits_error_dict,
'input_value_validator_list': upgradeselectedunits_input_value_validator_list,
'output_validator_list': upgradeselectedunits_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'continue': continue_rest_metadata,
'pause': pause_rest_metadata,
'reset': reset_rest_metadata,
'start': start_rest_metadata,
'upgradeselectedunits': upgradeselectedunits_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.upgrade.plan',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _StatusSummaryStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'component_type': type.OptionalType(type.StringType()),
'selection_status': type.OptionalType(type.StringType()),
'show_history': type.OptionalType(type.BooleanType()),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/upgrade/status-summary',
path_variables={
},
query_parameters={
'component_type': 'component_type',
'selection_status': 'selection_status',
'show_history': 'show_history',
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'UpgradeStatus'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.upgrade.status_summary',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _SummaryStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/upgrade/summary',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'UpgradeSummary'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.upgrade.summary',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _UpgradeUnitGroupsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for addupgradeunits operation
addupgradeunits_input_type = type.StructType('operation-input', {
'group_id': type.StringType(),
'upgrade_unit_list': type.ReferenceType('com.vmware.nsx.model_client', 'UpgradeUnitList'),
})
addupgradeunits_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
addupgradeunits_input_value_validator_list = [
]
addupgradeunits_output_validator_list = [
]
addupgradeunits_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/upgrade/upgrade-unit-groups/{group-id}?action=add_upgrade_units',
request_body_parameter='upgrade_unit_list',
path_variables={
'group_id': 'group-id',
},
| |
INDIVIDUAL_FILE)
target = open(OUTPUT_DIR + "/0_0_best_ever_fitness.txt", 'w')
target.write(str(best_ever["fitness"]))
target.close()
EVO_TESTAR_ACTIONS = 1000
GENERATION_IDX = 0
INDIVIDUAL_IDX = 0
evaluate_individual(best_ever)
# end by urueda
return best_ever
def print_stats(generation, individuals):
"""
Print the statistics for the generation and population.
:param generation:generation number
:type generation: int
:param individuals: population to get statistics for
:type individuals: list
"""
def get_ave_and_std(values):
"""
Return average and standard deviation.
:param values: Values to calculate on
:type values: list
:returns: Average and Standard deviation of the input values
:rtype: tuple
"""
_ave = float(sum(values)) / len(values)
_std = math.sqrt(float(
sum((value - _ave) ** 2 for value in values)) / len(values))
return _ave, _std
# Make sure individuals are sorted
sort_population(individuals)
# Get the fitness values
fitness_values = [i["fitness"] for i in individuals]
# Get the number of nodes
size_values = [get_number_of_nodes(i["genome"], 0) for i in individuals]
# Get the max depth
depth_values = [get_max_tree_depth(i["genome"], 0, 0) for i in individuals]
# Get average and standard deviation of fitness
ave_fit, std_fit = get_ave_and_std(fitness_values)
# Get average and standard deviation of size
ave_size, std_size = get_ave_and_std(size_values)
# Get average and standard deviation of max depth
ave_depth, std_depth = get_ave_and_std(depth_values)
# Print the statistics
print(
"Gen:%d fit_ave:%.2f+-%.3f size_ave:%.2f+-%.3f "
"depth_ave:%.2f+-%.3f max_size:%d max_depth:%d max_fit:%f "
"best_solution:%s" %
(generation,
ave_fit, std_fit,
ave_size, std_size,
ave_depth, std_depth,
max(size_values), max(depth_values), max(fitness_values),
individuals[0]))
def subtree_mutation(individual, param):
"""
Return a new individual by randomly picking a node and growing a
new subtree from it.
:param individual: Individual to mutate
:type individual: dict
:param param: parameters for pony gp
:type param: dict
:returns: Mutated individual
:rtype: dict
"""
# Copy the individual for mutation
new_individual = {
"genome": copy.deepcopy(individual["genome"]),
"fitness": DEFAULT_FITNESS
}
# Check if mutation should be applied
if random.random() < param["mutation_probability"]:
# Pick random node
end_node_idx = get_number_of_nodes(new_individual["genome"], 0) - 1
node_idx = random.randint(0, end_node_idx)
# Get node depth
node_depth, cnt = get_depth_from_index(new_individual["genome"],
0,
node_idx,
0)
assert param["max_depth"] >= node_depth
# Get a new symbol for the subtree
new_subtree = [get_random_symbol(node_depth,
param["max_depth"],
param["symbols"])
]
# Grow tree if it is a function symbol
if new_subtree[0] in param["symbols"]["functions"]:
# Grow to full depth?
full = bool(random.getrandbits(1))
# Grow subtree
grow(new_subtree,
node_depth,
param["max_depth"],
full,
param["symbols"])
assert get_max_tree_depth(new_subtree, node_depth, 0) \
<= param["max_depth"]
# Replace the original subtree with the new subtree
find_and_replace_subtree(new_individual["genome"],
new_subtree,
node_idx,
0)
assert get_max_tree_depth(new_individual["genome"], 0, 0) \
<= param["max_depth"]
# Return the individual
return new_individual
def subtree_crossover(parent1, parent2, param):
"""
Returns two individuals. The individuals are created by
selecting two random nodes from the parents and swapping the
subtrees.
:param parent1: Parent one to crossover
:type parent1: dict
:param parent2: Parent two to crossover
:type parent2: dict
:param param: parameters for pony gp
:type param: dict
:return: Children from the crossed over parents
:rtype: tuple
"""
# Copy the parents to make offsprings
offsprings = ({
"genome": copy.deepcopy(parent1["genome"]),
"fitness": DEFAULT_FITNESS
},
{
"genome": copy.deepcopy(parent2["genome"]),
"fitness": DEFAULT_FITNESS
})
# Check if offspring will be crossed over
if random.random() < param["crossover_probability"]:
xo_nodes = []
node_depths = []
for i, offspring in enumerate(offsprings):
# Pick a crossover point
end_node_idx = get_number_of_nodes(offsprings[i]["genome"], 0) - 1
node_idx = random.randint(0, end_node_idx)
# Find the subtree at the crossover point
xo_nodes.append(get_node_at_index(offsprings[i]["genome"],
node_idx))
xo_point_depth = get_max_tree_depth(xo_nodes[-1], 0, 0)
offspring_depth = get_max_tree_depth(offspring["genome"], 0, 0)
node_depths.append((xo_point_depth, offspring_depth))
# Make sure that the offspring is deep enough
if (node_depths[0][1] + node_depths[1][0]) >= param["max_depth"] or \
(node_depths[1][1] + node_depths[0][0]) >= param[
"max_depth"]:
return offsprings
# Swap the nodes
tmp_offspring_1_node = copy.deepcopy(xo_nodes[1])
# Copy the children from the subtree of the first offspring
# to the chosen node of the second offspring
replace_subtree(xo_nodes[0], xo_nodes[1])
# Copy the children from the subtree of the second offspring
# to the chosen node of the first offspring
replace_subtree(tmp_offspring_1_node, xo_nodes[0])
for offspring in offsprings:
assert get_max_tree_depth(offspring["genome"], 0, 0) \
<= param["max_depth"]
# Return the offsprings
return offsprings
def tournament_selection(population, param):
"""
Return individuals from a population by drawing
`tournament_size` competitors randomly and selecting the best
of the competitors. `population_size` number of tournaments are
held.
:param population: Population to select from
:type population: list
:param param: parameters for pony gp
:type param: dict
:returns: selected individuals
:rtype: list
"""
# Iterate until there are enough tournament winners selected
winners = []
while len(winners) < param["population_size"]:
# Randomly select tournament size individual solutions
# from the population.
competitors = random.sample(population, param["tournament_size"])
# Rank the selected solutions
competitors = sort_population(competitors)
# Append the best solution to the winners
winners.append(competitors[0])
return winners
def generational_replacement(new_population, old_population, param):
"""
Return new a population. The `elite_size` best old_population
are appended to the new population. They are kept in the new
population if they are better than the worst.
:param new_population: the new population
:type new_population: list
:param old_population: the old population
:type old_population: list
:param param: parameters for pony gp
:type param: dict
:returns: the new population with the best from the old population
:rtype: list
"""
# Sort the population
old_population = sort_population(old_population)
# Append a copy of the best solutions of the old population to
# the new population. ELITE_SIZE are taken
for ind in old_population[:param["elite_size"]]:
new_population.append(copy.deepcopy(ind))
# Sort the new population
new_population = sort_population(new_population)
# Set the new population size
return new_population[:param["population_size"]]
def generational_replacement(new_individual, old_population):
# if int(old_population[len(old_population) -1]["fitness"]) <= int(new_individual["fitness"]):
if float(old_population[len(old_population) -1]["fitness"]) <= float(new_individual["fitness"]): # by urueda
old_population [len(old_population) -1] = new_individual
print(old_population[len(old_population) -1])
return old_population
def run(param):
"""
Return the best solution. Create an initial
population. Perform an evolutionary search.
:param param: parameters for pony gp
:type param: dict
:returns: Best solution
:rtype: dict
"""
# Create population
population = initialize_population(param)
# Start evolutionary search
best_ever = search_loop(population, param)
return best_ever
#return best_ever
def parse_exemplars(file_name):
"""
Parse a CSV file. Parse the fitness case and split the data into
Test and train data. In the fitness case file each row is an exemplar
and each dimension is in a column. The last column is the target value of
the exemplar.
:param file_name: CSV file with header
:type file_name: str
:return: Fitness cases and targets
:rtype: list
"""
# Open file
with open(file_name, 'r') as in_file:
# Create a CSV file reader
reader = csv.reader(in_file, delimiter=',')
# Read the header
headers = reader.next()
# Store fitness cases and their target values
fitness_cases = []
targets = []
for row in reader:
# Parse the columns to floats and append to fitness cases
fitness_cases.append(map(float, row[:-1]))
# The last column is the target
targets.append(float(row[-1]))
print("Reading: %s headers: %s exemplars:%d" %
(file_name, headers, len(targets)))
return fitness_cases, targets
def get_symbols():
"""
Return a symbol dictionary. Helper method to keep the code clean. The nodes
in a GP tree consists of different symbols. The symbols are either
functions (internal nodes with arity > 0) or terminals (leaf nodes with
arity = 0) The symbols is represented as a dictionary with the keys:
- *arities* -- A dictionary where a key is a symbol and the value is the
arity
- *terminals* -- A list of strings(symbols) with arity 0
- *functions* -- A list of strings(symbols) with arity > 0
:return: Symbols used for GP individuals
:rtype: dict
"""
# Dictionary of symbols and their arity
arities = {"nLeftClick": 0,
"nTypeInto": 0,
"LeftClick": 0,
"TypeInto": 0,
"pickAnyUnexecuted": 1,
"pickAny": 1,
"LT": 2,
"EQ": 2
}
# List of terminal symbols
terminals = []
#List of types
types = []
# List of function symbols
functions = []
# List of logic operator
logicFunctions = []
# Append symbols to terminals or functions by looping over the
# arities items
for key, value in arities.items():
# A symbol with arity 0 is a terminal
if value == 0:
# Append the symbols to the terminals list
if key == "nLeftClick" or key == "nTypeInto":
terminals.append(key)
else:
types.append(key)
else:
# Append the symbols to the functions list
if key == "LT" or key == "EQ":
logicFunctions.append(key)
else:
functions.append(key)
return {"arities": arities, "terminals": | |
if name[:4] in ['Heli', 'Shee', 'Coil', 'Turn', 'Stra']:
SS= g.SS
name='%s%s'%(SS.name, SS.chain.id)
#print name
colors=mol.geomContainer.getGeomColor(name)
if colors is None :
#get the regular color for this SS if none is get
colors = [SecondaryStructureType[SS.structureType],]
flag=mol.geomContainer.geoms[name].vertexArrayFlag
if hasattr(g,"obj"):
self._changeColor(g,colors,perVertex=flag)
elif geom=="cpk" or geom=="balls":
#have instance materials...so if colorbyResidue have to switch to residueMaterial
parent = self.getSelectionCommand(sel,mol)
g = mol.geomContainer.geoms[geom]
colors=mol.geomContainer.getGeomColor(geom)
#or do we use the options[1] which should be the colors ?
prefix="S"
name="cpk"
if geom == "balls" :
prefix="B"
name="bs"#"balls&sticks"
if len(sel) == len(mol.allAtoms) :
p = mol.name+"_"+name
else :
p = parent
if hasattr(g,"obj"):
[self._colorSphere(x[1],x[0],sel,
prefix,p,fType,geom) for x in enumerate(sel)]
# map(lambda x,sel=sel,prefix=prefix,p=p,fType=fType,geom=geom:
# self._colorSphere(x[1],x[0],sel,prefix,p,fType,geom),
# enumerate(sel))
elif geom =="sticks" :
g = mol.geomContainer.geoms[geom]
colors = mol.geomContainer.getGeomColor(geom)
parent = self.getSelectionCommand(sel,mol)
if hasattr(g,"obj"):
atoms=sel
set = mol.geomContainer.atoms["sticks"]
if len(set) == len(mol.allAtoms) : p = mol.name+"_cpk"
else : p = parent
bonds, atnobnd = set.bonds
if len(set) != 0 :
[self._colorStick(x[1],x[0],atoms,len(bonds),fType,p,mol) for x in enumerate(bonds)]
# map(lambda x,atoms=atoms,p=p,fType=fType,mol=mol,bonds=bonds:
# self._colorStick(x[1],x[0],atoms,bonds,fType,p,mol),
# enumerate(bonds))
else :
g = mol.geomContainer.geoms[geom]
colors=mol.geomContainer.getGeomColor(geom)
flag=g.vertexArrayFlag
if hasattr(g,"obj"):
if self.soft=="c4d" :
self._changeColor(g,colors,perVertex=flag,
proxyObject=self.colorProxyObject,
pb=self.use_progressBar)
elif self.soft =="c4dr12":
self._changeColor(g,colors,perVertex=flag,
proxyObject=True,
pb=self.use_progressBar)
else :
self._changeColor(g,colors,perVertex=flag,
pb=self.use_progressBar)
def _isoSurface(self,grid,options):
"""
Callback for computing isosurface of grid volume data. will create and update
the mesh showing the isosurface at a certain isovalue.
@type grid: Volume.Grid3D
@param grid: the current grid volume data
@type options: list
@param options: the list of option used for the command; ie isovalue, size...
"""
if len(options) ==0:
name=grid.name
g = grid.srf
else :
name = options[0]
g = grid.geomContainer['IsoSurf'][name]
print name, g
root = None
if hasattr(self.mv,'cmol') and self.mv.cmol != None:
mol = self.mv.cmol
root = mol.geomContainer.masterGeom.obj
else :
if hasattr(grid.master_geom,"obj"):
root = grid.master_geom.obj
else :
root = self._newEmpty(grid.master_geom.name)
self._addObjectToScene(self._getCurrentScene(),root)
self._addObjToGeom(root,grid.master_geom)
if hasattr(g,"obj") : #already computed so need update
sys.stderr.write("UPDATE MESH")
self._updateMesh(g,parent=root)
else :
self.createMesh(name,g,proxyCol=False,parent=root)
def coarseMolSurface(self,molFrag,XYZd,isovalue=7.0,resolution=-0.3,padding=0.0,
name='CoarseMolSurface',geom=None):
"""
Function adapted from the Vision network which compute a coarse molecular
surface in PMV
@type molFrag: MolKit.AtomSet
@param molFrag: the atoms selection
@type XYZd: array
@param XYZd: shape of the volume
@type isovalue: float
@param isovalue: isovalue for the isosurface computation
@type resolution: float
@param resolution: resolution of the final mesh
@type padding: float
@param padding: the padding
@type name: string
@param name: the name of the resultante geometry
@type geom: DejaVu.Geom
@param geom: update geom instead of creating a new one
@rtype: DejaVu.Geom
@return: the created or updated DejaVu.Geom
"""
self.mv.assignAtomsRadii(molFrag.top, united=1, log=0, overwrite=0)
from MolKit.molecule import Atom
atoms = molFrag.findType(Atom)
coords = atoms.coords
radii = atoms.vdwRadius
#self.assignAtomsRadii("1xi4g", united=1, log=0, overwrite=0)
from UTpackages.UTblur import blur
import numpy.oldnumeric as Numeric
volarr, origin, span = blur.generateBlurmap(coords, radii, XYZd,resolution, padding = 0.0)
volarr.shape = (XYZd[0],XYZd[1],XYZd[2])
volarr = Numeric.ascontiguousarray(Numeric.transpose(volarr), 'f')
#print volarr
weights = Numeric.ones(len(radii), typecode = "f")
h = {}
from Volume.Grid3D import Grid3DF
maskGrid = Grid3DF( volarr, origin, span , h)
h['amin'], h['amax'],h['amean'],h['arms']= maskGrid.stats()
#(self, grid3D, isovalue=None, calculatesignatures=None, verbosity=None)
from UTpackages.UTisocontour import isocontour
isocontour.setVerboseLevel(0)
data = maskGrid.data
origin = Numeric.array(maskGrid.origin).astype('f')
stepsize = Numeric.array(maskGrid.stepSize).astype('f')
# add 1 dimension for time steps amd 1 for multiple variables
if data.dtype.char!=Numeric.Float32:
#print 'converting from ', data.dtype.char
data = data.astype('f')#Numeric.Float32)
newgrid3D = Numeric.ascontiguousarray(Numeric.reshape( Numeric.transpose(data),
(1, 1)+tuple(data.shape) ), data.dtype.char)
ndata = isocontour.newDatasetRegFloat3D(newgrid3D, origin, stepsize)
isoc = isocontour.getContour3d(ndata, 0, 0, isovalue,
isocontour.NO_COLOR_VARIABLE)
vert = Numeric.zeros((isoc.nvert,3)).astype('f')
norm = Numeric.zeros((isoc.nvert,3)).astype('f')
col = Numeric.zeros((isoc.nvert)).astype('f')
tri = Numeric.zeros((isoc.ntri,3)).astype('i')
isocontour.getContour3dData(isoc, vert, norm, col, tri, 0)
#print vert
if maskGrid.crystal:
vert = maskGrid.crystal.toCartesian(vert)
#from DejaVu.IndexedGeom import IndexedGeom
from DejaVu.IndexedPolygons import IndexedPolygons
if geom == None :
g=IndexedPolygons(name=name)
else :
g = geom
#print g
inheritMaterial = None
g.Set(vertices=vert, faces=tri, materials=None,
tagModified=False,
vnormals=norm, inheritMaterial=inheritMaterial )
#shouldnt this only for the selection set ?
g.mol = molFrag.top
for a in atoms:#g.mol.allAtoms:
a.colors[g.name] = (1.,1.,1.)
a.opacities[g.name] = 1.0
self.mv.bindGeomToMolecularFragment(g, atoms)
#print len(g.getVertices())
return g
def getCitations(self):
citation=""
for module in self.mv.showCitation.citations:
citation +=self.mv.showCitation.citations[module]
return citation
def testNumberOfAtoms(self,mol):
nAtoms = len(mol.allAtoms)
if nAtoms > 5000 :
mol.doCPK = False
else :
mol.doCPK = True
#def piecewiseLinearInterpOnIsovalue(x):
# """Piecewise linear interpretation on isovalue that is a function
# blobbyness.
# """
# import sys
# X = [-3.0, -2.5, -2.0, -1.5, -1.3, -1.1, -0.9, -0.7, -0.5, -0.3, -0.1]
# Y = [0.6565, 0.8000, 1.0018, 1.3345, 1.5703, 1.8554, 2.2705, 2.9382, 4.1485, 7.1852, 26.5335]
# if x<X[0] or x>X[-1]:
# print "WARNING: Fast approximation :blobbyness is out of range [-3.0, -0.1]"
# return None
# i = 0
# while x > X[i]:
# i +=1
# x1 = X[i-1]
# x2 = X[i]
# dx = x2-x1
# y1 = Y[i-1]
# y2 = Y[i]
# dy = y2-y1
# return y1 + ((x-x1)/dx)*dy
#####EXTENSIONS FUNCTION
def showMolPose(self,mol,pose,conf):
"""
Show pyrosetta pose object which is a the result conformation of a
simulation
@type mol: MolKit.Protein
@param mol: the molecule node to apply the pose
@type pose: rosetta.Pose
@param pose: the new pose from PyRosetta
@type conf: int
@param conf: the indice for storing the pose in the molecule conformational stack
"""
from Pmv.moleculeViewer import EditAtomsEvent
pmv_state = conf
import time
if type(mol) is str:
model = self.getMolFromName(mol.name)
else :
model = mol
model.allAtoms.setConformation(conf)
coord = {}
print pose.n_residue(),len(model.chains.residues)
for resi in range(1, pose.n_residue()+1):
res = pose.residue(resi)
resn = pose.pdb_info().number(resi)
#print resi,res.natoms(),len(model.chains.residues[resi-1].atoms)
k=0
for atomi in range(1, res.natoms()+1):
name = res.atom_name(atomi).strip()
if name != 'NV' :
a=model.chains.residues[resi-1].atoms[k]
pmv_name=a.name
k = k + 1
if name != pmv_name :
if name[1:] != pmv_name[:-1]:
print name,pmv_name
else :
coord[(resn, pmv_name)] = res.atom(atomi).xyz()
cood=res.atom(atomi).xyz()
a._coords[conf]=[cood.x,cood.y,cood.z]
else :
coord[(resn, name)] = res.atom(atomi).xyz()
cood=res.atom(atomi).xyz()
a._coords[conf]=[cood.x,cood.y,cood.z] #return coord
model.allAtoms.setConformation(conf)
event = EditAtomsEvent('coords', model.allAtoms)
self.dispatchEvent(event)
#epmv.insertKeys(model.geomContainer.geoms['cpk'],1)
self.helper.update()
def updateDataGeom(self,mol):
"""
Callback for updating special geometry that are not PMV generated and which
do not react to editAtom event. e.g. pointCloud or Spline
@type mol: MolKit.Protein
@param mol: the parent molecule
"""
mname = mol.name
for c in mol.chains :
self.helper.update_spline(mol.name+"_"+c.name+"spline",c.residues.atoms.get("CA").coords)
if self.doCloud :
self.helper.updatePoly(mol.name+":"+c.name+"_cloud",vertices=c.residues.atoms.coords)
#look if there is a msms:
# #find a way to update MSMS and coarse
# if self.mv.molDispl[mname][3] : self.gui.updateSurf()
# if self.mv.molDispl[mname][4] : self.gui.updateCoarseMS()
def updateData(self,traj,step):
"""
Callback for updating molecule data following the data-player.
DataType can be : MD trajectory, Model Data (NMR, DLG, ...)
@type traj: array
@param traj: the current trajectory object. ie [trajData,trajType]
@type step: int or float
@param step: the new value to apply
"""
if traj[0] is not None :
if traj[1] == 'traj':
mol = traj[0].player.mol
maxi=len(traj[0].coords)
mname = mol.name
if step < maxi :
traj[0].player.applyState(int(step))
self.updateDataGeom(mol)
elif traj[1] == "model":
mname = traj[0].split(".")[0]
type = traj[0].split(".")[1]
mol = self.mv.getMolFromName(mname)
if type == 'model':
nmodels=len(mol.allAtoms[0]._coords)
if step < nmodels:
mol.allAtoms.setConformation(step)
#self.mv.computeSecondaryStructure(mol.name,molModes={mol.name:'From Pross'})
from Pmv.moleculeViewer import EditAtomsEvent
event = EditAtomsEvent('coords', mol.allAtoms)
self.mv.dispatchEvent(event)
self.updateDataGeom(mol)
else :
nmodels=len(mol.docking.ch.conformations)
if step < nmodels:
mol.spw.applyState(step)
self.updateDataGeom(mol)
def updateTraj(self,traj):
"""
Callback for updating mini,maxi,default,step values needed by the data player
DataType can be : MD trajectory, Model Data (NMR, DLG, ...)
@type traj: array
@param traj: the current trajectory object. ie [trajData,trajType]
"""
if traj[1] == "model":
mname = traj[0].split(".")[0]
type = traj[0].split(".")[1]
mol = self.mv.getMolFromName(mname)
if type == 'model':
nmodels=len(mol.allAtoms[0]._coords)
else :
nmodels=len(mol.docking.ch.conformations)
mini=0
maxi=nmodels
default=0
step=1
elif type == "traj":
mini=0
maxi=len(traj[0].coords)
default=0
step=1
elif type == "grid":
mini=traj[0].mini
maxi=traj[0].maxi
default=traj[0].mean
step=0.01
return mini,maxi,default,step
def renderDynamic(self,traj,timeWidget=False,timeLapse=5):
"""
Callback for render a full MD trajectory.
@type traj: array
@param traj: the current trajectory object. ie [trajData,trajType]
@type timeWidget: boolean
@param timeWidget: use the timer Widget to cancel the rendering
@type timeLapse: int
@param timeLapse: the timerWidget popup every timeLapse
"""
if timeWidget:
dial= self.helper.TimerDialog()
dial.cutoff = 15.0
if traj[0] is not None :
if traj[1] == 'traj':
mol = traj[0].player.mol
maxi=len(traj[0].coords)
mname = mol.name
for i in range(maxi):
if timeWidget and | |
<reponame>LYH-93/LYH-93.github.io
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
"""-------------------------------------------------------------------------------------------------------------------------------------------------------------- """
"""
자료구조 : 1. sequence 자료구조 (순서있음)
2. 딕셔너리, 세트 (순서없음)
인덱싱 : 리스트에서 하나의 요소를 인덱스 연산자를 통하여 참조(접근)하는 것
슬라이싱 : 리스트 안에서 범위를 지정하여서 원하는 요소들을 선택하는 연산
리스트 : 여러 개의 데이터가 저장되어 있는 자료구조
리스트가 필요한 이유 : 여러 개의 데이터가 저장되어 있는 자료구조
"""
# 문자열 인덱싱 & 슬라이싱
text = "IT Will is power."
print(text[:-2]) # IT Will is powe
print(text[:8], text[-1])
# 인덱싱
flist = ["apple", "banana", "tomato", "peach", "pear" ]
print(flist[0], flist[3], flist[-1])
# 슬라이싱
a = [0,1,4,9,16,25,36,49]
a[3:6]
# 리스트 (append로 추가해서 리스트를 만들어보는 것)
scores = [ ]
for i in range(10):
scores.append(int(input("성적을 입력하시오:")))
print(scores)
scores[0] = 80
scores[i] = 10;
scores[i+2] = 20;
# 리스트의 요소갯수만큼 리스트가 반복되어 출력
for element in scores:
print(scores)
# 복잡한 리스트
list1 = [12,"dog",180.14] # 혼합자료형
list2 = [["Seoul", 10], ["Paris", 12], ["London", 50]] # 내장 리스트
# 리스트 기초연산
marvel_heroes = [ "스파이더맨", "헐크", "아이언맨" ]
dc_heroes = [ "슈퍼맨", "배트맨", "원더우먼" ]
heros = marvel_heroes + dc_heroes
heros
# 리스트에 곱하기
values = [1,2,3]*3
values # [1, 2, 3, 1, 2, 3, 1, 2, 3]
len(values)
# 요소추가하기
developteam = []
developteam.append("이유현")
developteam.append("성민승")
developteam.append("김동완")
developteam
# 리스트 안의 내용 조회
if "이유현" in developteam:
print("내부인원")
# 리스트 인덱스 확인
developteam.index("이유현") # 0
developteam.index("김동완") # 2
# 리스트 최소값 최대값
values = [ 100, 20, 31, 45, 15, 6, 7, 8, 9, 10 ]
min(values)
max(values)
# 리스트에서 sort 쓰기 정렬~~~
values.sort()
value2=sorted(values)
print(value2)
# 리스트 내의 조건문
list1 = [3,4,5]
list2 = [x*2 for x in list1]
print(list2)
# 2차원 리스트
s = [
[ 1, 2, 3, 4, 5 ] ,
[ 6, 7, 8, 9, 10 ],
[11, 12, 13, 14, 15 ]
]
print(s)
# 동적으로 2차원 리스트 생성
rows = 3
cols = 5
s = []
for row in range(rows):
s += [[3]*cols]
print("s=",s) # s= [[3, 3, 3, 3, 3], [3, 3, 3, 3, 3], [3, 3, 3, 3, 3]]
rows = len(s)
cols = len(s[0])
cols
for r in range(rows):
for c in range(cols):
print(s[r][c], end=",")
print()
"""
tuple!!
튜플은 변경될 수 없는 리스트 + 순서가 없다!!
tuple('listname')
: 리스트를 튜플로 변경한다.
"""
# tuple을 변경하려고 해보자
t1 = (1,2,3,4,5);
t2 = (1,2,3,4,5);
t1[0] =100; # TypeError: 'tuple' object does not support item assignment
# 튜플 대입 연산
student1 = ("철수",19,"CS")
(name,age,major) = student1
name # '철수'
"""
set!!
세트는 중복되지 않은 항목들이 모인것 + 순서가 없다!!
"""
numbers = {1,2,2,3,3,3,4}
numbers # {1, 2, 3, 4}
# 요소 추가
numbers.add(5)
"""
dictionary
딕셔너리는 키(key)와 값(value)의 쌍을 저장할 수 있는 객체
"""
# 형태
dictionary = {'name':'이유현','phone':'01091597160','score':100}
dictionary['score']
# 추가하기
dictionary['speed'] = '1000'
print(dictionary)
# 항목 순회하며 출력하기.
for item in dictionary.items():
print(item)
"""
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
plot 문제로 정리하기
1. matplotlib
1) 한글 및 음수 부호 지원
2) 기본차트 시각화 - 기본 선 스타일과 색상, x축 y축 스타일&색상, color와 marker 이용
3) 산점도, 히스토그램, 상자그래프
3) 이산형 변수 시각화 - 가로막대 그래프, 세로막대 차트, 원차트
4) subplot 차트
5) 시계열차트
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
"""
data3= np.random.randn(50) # 난수
data4= np.random.randn(50).cumsum() # 난수
chart.plot(data3, color='r', label='step',
drawstyle="steps-post")
chart.plot(data4, color='g', label='line')
plt.legend(loc='best')
plt.ylabel('y label')
plt.xlabel('x label')
plt.title('chart title')
plt.show()
import matplotlib.pyplot as plt
import seaborn as sn
# 문3) seaborn의 titanic 데이터셋을 이용하여 다음과 같이 단계별로 시각화하시오.
titanic = sn.load_dataset('titanic')
print(titanic.info())
# <단계1> 'total_bill','tip','sex','size' 칼럼으로 서브셋 만들기
titanic_df = titanic[['survived','pclass', 'age','fare']]
print(titanic_df.info())
#sn.pairplot(data=DataFrame, hue='집단변수', kind='scatter')
# <단계2> 성별(sex) 칼럼을 집단변수로 산점도행렬 시각화
sn.pairplot(data=titanic_df, hue='survived')
plt.show()
# <단계3> 산점도행렬의 시각화 결과 해설하기
'''
pclass : 3등석 일수록 사망비율 매우 높음
pclass vs fare : 1등석 일수록 고 요금
age : 25~50세 사이에서 가장 높은 빈도, 사망과 생존 비율 비슷
age vs fare : 대체적으로 나이가 많고, 요금이 낮은 경우 사망 비율 높음
fare : 비용이 저렴한 경우가 상대적으로 많은 분포
fare vs age : 대체적으로 요금이 낮고, 나이가 50대 이상인 경우 사망 비율 높음
'''
# survived vs age
# 연령대 생존비율 : 20~40
titanic[titanic['survived'] == 1].age.plot(kind = 'hist', color = 'blue')
# 연령대 사망비율 : 20~40
titanic[titanic['survived'] == 0].age.plot(kind = 'hist', color = 'green')
# 문4) seaborn의 tips 데이터셋을 이용하여 다음과 같이 단계별로 시각화하시오.
tips = sn.load_dataset('tips')
print(tips.info())
# <단계1> 'total_bill','tip','sex','size' 칼럼으로 서브셋 만들기
tips_df = tips[['total_bill','tip','sex','size']]
# <단계2> 성별(sex) 칼럼을 집단변수로 산점도행렬 시각화
sn.pairplot(data=tips_df, hue='sex')
plt.show()
# <단계3> 산점도행렬의 시각화 결과 해설하기
"""
total_bill : 총금액 15~20 사이가 가장 높은 빈도, 금액이 클 수록 남자 지불
total_bill vs tip : 대체적으로 비례관계, 총금액과 팁이 많은 경우 남자 지불
tip : 팁은 1~5 사이가 가장 높은 빈도, 팁 금액이 클 수록 남자 지불
total_bill vs size : 행사규모가 작은 경우 여성 지불
size : 행사규모 2가 가장 높은 빈도, 특히 4일때 남자 지불
size vs total_bill : 대체적으로 비례관계, 규모가 큰 경우 총금액이 많음
"""
import pandas as pd # object
import numpy as np # dataset
import matplotlib.pyplot as plt # plt.show()
# 1. 기본 차트 시각화
ser = pd.Series(np.random.randn(10)) # 1d
print(ser)
# 1차원 객체 : 기본차트 - 선 그래프
ser.plot(color='g')
plt.show()
# 2차원 객체
df = pd.DataFrame(np.random.randn(10, 4),
columns=('one','two','three','fore'))
print(df)
# 기본차트 : 선 그래프
df.plot()
plt.show()
# 막대차트 : 세로
df.plot(kind='bar', title = 'bar chart')
plt.show()
# 막대차트 : 가로
df.plot(kind='barh', title = 'bar chart')
plt.show()
# 막대차트 : 가로, 누적형
df.plot(kind='barh', title = 'barh chart', stacked=True)
plt.show()
# 2. dataset 이용
import os
os.chdir('C:/ITWILL/4_Python-II/data')
tips = pd.read_csv('tips.csv')
print(tips.info())
# 교차분할표 : 집단변수 이용
# 요일(day):행 vs 규모(size):열
tips['day'].unique() # ['Sun', 'Sat', 'Thur', 'Fri']
tips['size'].unique()# [2, 3, 4, 1, 6, 5]
tab = pd.crosstab(index=tips['day'], columns=tips['size'])
print(tab)
# 테이블 정보
tab.shape # (4, 6)
tab.index # 행 이름
tab.columns # 열 이름
#tab.index = 수정 이름
type(tab) # pandas.core.frame.DataFrame
help(tab.plot)
# size : 1, 6 제외 -> subset
#obj.loc[행, 열]
new_tab = tab.loc[:, 2:5]
print(new_tab)
new_tab.plot(kind='barh', stacked=True,
title = 'day and size')
plt.show()
"""
---------------------------------------------------------------------------------------------------------------------------------------------------------------------
plot 예제 끝~~
"""
"""
---------------------------------------------------------------------------------------------------------------------------------------------------------------------
1. Group by : 머 대강 칼럼들을 특정 변수의 조건에 맞게 그룹화 시키는거다
2. apply :
3. Pivot table ---> df.pivot(index='x',columns='y',values='z')
- 좌측 index(행)를 어떤 칼럼으로 설정할지
- 칼럼(열)쪽을 y칼럼으로 세우고
- 안에 채울 values 값 z로 설정
---------------------------------------------------------------------------------------------------------------------------------------------------------------------
"""
"""
NUMPY!!!!!!!!!!!!!
1. 배열 생성 array()
- arange() : 배열 객체 반환
- linspace() : 시작점과 끝점을 균일 간격으로 나눈 점들을 생성
- reshape() : 행수와 열수를 조절
2. 특수 행렬 생성 zeros() : 0으로 채워진 배열, ones() : 1로 채워진 배열 ,eye() : 항등행렬
3. 난수 생성 numpy.random.normal(size = 개수)
4. 산술연산
"""
"""
# Series Pandas의 Series는 1차원 데이터를 다루는 데 효과적인 자료구조
values 속성을 호출하면 데이터의 배열 원소가 리턴
index 속성을 호출하면 인덱스의 정보가 리턴
각각의 데이터는 [인덱스]를 이용해서 접근이 가능
numpy 함수 사용 가능
dict 객체로 생성 가능
"""
# code 1
from pandas import Series, DataFrame
import numpy as np
import pandas as pd
price = Series([4000, 3000, 3500, 2000])
print(price)
print(price.index)
print(price.values)
print('====================')
fruit = Series([4000, 3000, 3500, 2000],
index=['apple', 'mellon','orange', 'kiwi'])
print(fruit)
print(fruit[0]) # 순번 이용 데이터 접근
print(fruit['apple']) # 인덱스 이용 데이터 접근
print(fruit[fruit>3000]) # 부울리언 식
print("=================")
# code 2
from pandas import Series, DataFrame
import numpy as np
import pandas as pd
good1 = Series([4000,3500,None,2000],index = ['apple','mango','orange','kiwi'])
good2 = Series([3000,3000,3500,2000],index = ['apple','mango','orange','kiwi'])
print(pd.isnull(good1)) # NaN값 검출 none이면 True를 반환
print (good1+good2)
# DataFrame
"""
# DataFrame은 행과 열로 구성된 2차원 데이터를 다루는 데 효과적인 자료구조
일반적으로 딕셔너리 활용해서 생성
입력가능한 데이터
1. 2차원 ndarray
2. 리스트, 튜플,dict, Series의 dict
3. dict, Series의 list
4. 리스트, 튜플의 리스트
5. 칼럼 뽑는 방법
1) data.iloc[[행]],[열] # 행은 index
"""
# code 1
from pandas import Series, DataFrame
items = {'code': [1,2,3,4,5,6],
'name': ['apple','watermelon','oriental melon', 'banana', 'lemon', 'mango'],
'manufacture': ['korea', 'korea', 'korea','philippines','korea', 'taiwan'],
'price':[1500, 15000,1000,500,1500,700]}
data = DataFrame(items)
print(data)
# 특정 컬럼만 뽑기
data1 = DataFrame(items, columns = ['code', 'price'])
print(data1)
print(data.loc[0]) # 0 행 출력 가로로 나옴
print(data.loc[:0]) # 0 행 출력 세로로 나옴
print(data.loc[[2],['name']])
# 데이터 프레임 변경해보기
data.index = np.arange(1,7,1) # 1~6까지 인덱스 설정
data.columns
data = data.reindex(['1','2','3','4','5','7'],columns = ['code', 'name', 'manufacture', 'price'])
print(data.index)
print(data);
"""-------------------------------------------------------------------------------------------------------------------------------------------------------------- """
# 정규분포 생성 알고리즘
""" 정규분포란? 가우시안 정규 분포 :
자연 현상에서 나타나는 숫자를 확률 모형으로 모형화할 때 가장 많이 사용되는 모형
표준편차 1배안에 전체 데이터의 약 70% 이상이 몰려있고
1.96배 안에 95% 이상이 분포된 경우
"""
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import font_manager, rc
from scipy import stats
import scipy as sp
font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
rc('font', family=font_name)
mu = 0
std = 1
rv = sp.stats.norm(mu, std)
xx = np.linspace(-5, 5, 100)
plt.plot(xx, rv.pdf(xx))
plt.ylabel("확률")
plt.title("정규분포곡선")
plt.show()
x = rv.rvs(100) # rvs 메서드로 시뮬레이션해 샘플을 얻는것
print(x)
# 검정통계 , 유의확률(p-value)
"""
검정(testing)은 데이터 뒤에 숨어있는 확률 변수의 분포와 모수에 대한 가설의 진 위를 정량적(quantitatively)으로 증명하는 작업
실제 모집단에서 표본 몇 십 개 또는 몇 백 개를 추출해서 그것의 분산(표본분산)이나 평균(표본평균)을 사용해야 하는 경우가 대 부분입니다.
표본수가 크지 않을 때 표본분산(표본표준편차)을 사용한 테스트는 t-분포를 이용 한다고 해서 T-test라고 합니다.
가설 증명 즉 검정의 기본적인 논리는 다음과 같습니다.
만약 가설이 맞다면 즉, 모수 값이 특정한 조건을 만족한다면 해당 확률 변수로부터 만 들어진 표본(sample) 데이터들은 어떤 규칙을 따르게 된다.
해당 규칙에 따라 표본 데이터 집합에서 어떤 숫자를 계산하면 계산된 숫자는 특정한 확률 분포를 따르게 된다. 이 숫자를 검정 통계치(test statistics)라고 하며 확률 분포를 검정 통계 분포(test statistics distribution)라고 한다. 검정 통계 분포의 종류 및 모수의 값은 처음에 정한 가설에 의해 결정된다. 이렇게 검정 | |
<filename>pygears/hdl/sv/svcompile.py
from pygears import reg
from pygears.hls import ir, is_intf_id
from pygears.hls import Context, HDLVisitor, Scope
from pygears.typing import Bool, typeof
from pygears.core.port import HDLProducer, HDLConsumer
from pygears.core.gear import InSig, OutSig
from dataclasses import dataclass, field
from typing import List
import itertools
from pygears.hdl.sv.v.accessors import rewrite
from .util import svgen_typedef
from .v.util import vgen_signal, vgen_intf
REG_TEMPLATE = """
always @(posedge clk) begin
if(rst | _rst_cond) begin
{0} <= {1};
end else if ({0}_en && cycle_done) begin
{0} <= {0}_next;
end
end
"""
class HDLWriter:
def __init__(self, indent=0):
self.indent = indent
self.lines = []
def line(self, line=''):
if not line:
self.lines.append('')
else:
self.lines.append(f'{" "*self.indent}{line}')
def block(self, block):
for line in block.split('\n'):
self.line(line)
def __str__(self):
return '\n'.join(self.lines)
res_true = ir.ResExpr(Bool(True))
@dataclass
class BlockLines:
header: List = field(default_factory=list)
content: List = field(default_factory=list)
footer: List = field(default_factory=list)
def is_reg_id(expr):
return (isinstance(expr, ir.Name) and isinstance(expr.obj, ir.Variable) and expr.obj.reg)
class SVCompiler(HDLVisitor):
def __init__(self, ctx, var, writer, selected, lang, aux_funcs=None):
self.ctx = ctx
self.writer = writer
self.var = var
self.selected = selected
self.block_lines = []
self.block_stack = []
self.defaults = Scope()
self.lang = lang
if self.lang == 'sv':
from .sv_expression import svexpr
self.separator = '.'
self.svexpr = svexpr
else:
from .v.v_expression import vexpr
self.separator = '.'
self.svexpr = vexpr
if aux_funcs is None:
aux_funcs = {}
self.aux_funcs = aux_funcs
@property
def cur_block_lines(self):
return self.block_lines[-1]
def trim_cur_block(self):
content = []
for c in self.cur_block_lines.content:
if isinstance(c, BlockLines) and not c.content:
continue
content.append(c)
self.cur_block_lines.content = content
def prepend(self, line):
self.cur_block_lines.content.insert(0, line)
def write(self, line):
self.cur_block_lines.content.append(line)
def header(self, line):
self.cur_block_lines.header.append(line)
def footer(self, line):
self.cur_block_lines.footer.append(line)
def enter_block(self, block):
self.defaults.subscope()
self.block_stack.append(block)
bl = BlockLines()
self.block_lines.append(bl)
if not isinstance(block, ir.HDLBlock):
return
def exit_block(self, block=None):
self.defaults.upscope()
bl = self.block_lines.pop()
if isinstance(block, ir.HDLBlock):
maybe_else = 'else ' if getattr(block, 'else_branch', False) else ''
# in_cond = ir.BinOpExpr((block.in_cond, block.opt_in_cond),
# ir.opc.And)
in_cond = block.in_cond
if in_cond != res_true:
in_cond_val = self.svexpr(in_cond, self.aux_funcs)
bl.header.append(f'{maybe_else}if ({in_cond_val}) begin')
elif maybe_else:
bl.header.append(f'else begin')
self.write(bl)
self.block_stack.pop()
def handle_defaults(self, target, stmt):
if target not in self.defaults:
self.defaults.items[target] = stmt
elif stmt != self.defaults[target]:
self.defaults[target] = stmt
self.write(stmt)
def attr(self, *args):
return self.separator.join(args)
def _assign_value(self, target, val):
if isinstance(target, ir.SubscriptExpr):
base_target = target.val
elif isinstance(target, ir.Name):
base_target = target
elif isinstance(target, ir.Component):
base_target = target.val
elif isinstance(target, ir.ConcatExpr):
for i, t in enumerate(target.operands):
self._assign_value(t, ir.SubscriptExpr(val, ir.ResExpr(i)))
return
else:
raise Exception
if is_reg_id(base_target):
if not self.selected(base_target):
return
name = self.svexpr(target, self.aux_funcs)
val = self.svexpr(val, self.aux_funcs)
if val is None:
return
svstmt = f"{name} = {val}"
self.handle_defaults(name, svstmt)
# self.write(f"{base_target.name}_en = 1")
ctx = base_target.ctx
base_target.ctx = 'en'
self.write(f"{self.svexpr(base_target)} = 1")
base_target.ctx = ctx
return
if target.dtype is OutSig:
if self.selected(target):
name = target.obj.val.name
svval = self.svexpr(val, self.aux_funcs)
svstmt = f"{name} = {svval}"
self.handle_defaults(name, svstmt)
return
elif is_intf_id(target):
name = self.svexpr(target, self.aux_funcs)
if target.ctx == 'store':
if self.selected(target):
if is_intf_id(val):
val_name = self.svexpr(val, self.aux_funcs)
svstmt = f"{name}_s = {val_name}_s"
self.handle_defaults(name, svstmt)
self.write(f"{self.attr(name, 'valid')} = {self.attr(val_name, 'valid')}")
elif val == ir.ResExpr(None):
self.handle_defaults(self.attr(name, 'valid'),
f"{self.attr(name, 'valid')} = 0")
else:
self.handle_defaults(self.attr(name, 'valid'),
f"{self.attr(name, 'valid')} = 1")
val = self.svexpr(val, self.aux_funcs)
if val is not None:
svstmt = f"{name}_s = {val}"
self.handle_defaults(name, svstmt)
if is_intf_id(val) and self.selected(val):
val_name = self.svexpr(val, self.aux_funcs)
self.write(f"{self.attr(val_name, 'ready')} = {self.attr(name, 'ready')}")
elif target.ctx == 'ready':
self.handle_defaults(self.attr(name, 'ready'), f"{self.attr(name, 'ready')} = 1")
return
if not self.selected(base_target):
return
if val.dtype is None:
return
target = self.svexpr(target, self.aux_funcs)
if target is None:
return
val = self.svexpr(val, self.aux_funcs)
if val is None:
return
svstmt = f"{target} = {val}"
self.handle_defaults(target, svstmt)
def AssertValue(self, node):
self.write(f'assert ({self.svexpr(node.val.test, self.aux_funcs)})')
self.write(f'else $error("{node.val.msg}");')
def AssignValue(self, node):
self._assign_value(node.target, node.val)
def list_initials(self):
for name, obj in self.ctx.scope.items():
if not isinstance(obj, ir.Variable):
continue
if typeof(obj.dtype, ir.IntfType):
# if isinstance(obj.val.producer, HDLProducer):
if (self.selected(self.ctx.ref(name, ctx='store'))
and obj.dtype.direction == ir.IntfType.iout):
yield self.attr(name, 'valid'), '0'
# elif len(obj.val.consumers) == 1 and isinstance(obj.val.consumers[0], HDLConsumer):
elif self.selected(self.ctx.ref(name, ctx='ready')):
if not is_port_intf(name, self.ctx):
yield self.attr(name, 'ready'), f"{self.attr(name, 'valid')} ? 0 : 1'bx"
elif obj.reg:
target = self.ctx.ref(name, ctx='en')
if self.selected(target):
yield (f'{self.svexpr(self.ctx.ref(name, ctx="store"), self.aux_funcs)}',
f'{self.svexpr(self.ctx.ref(name), self.aux_funcs)}')
yield f'{self.svexpr(target, self.aux_funcs)}', '0'
else:
pass
def CombBlock(self, node):
self.block_lines.append(BlockLines())
self.header(f'// Comb block for: {self.var}')
self.header(f'always_comb begin')
for target, expr in self.list_initials():
self.handle_defaults(target, f"{target} = {expr}")
# self.prepend(f"{target} = {stmt}")
# self.defaults[target] = stmt
self.HDLBlock(node)
self.trim_cur_block()
for target, svstmt in reversed(list(self.defaults.items.items())):
self.prepend(svstmt)
self.footer('end')
def FuncReturn(self, node):
retval = self.svexpr(node.expr, self.aux_funcs)
if self.lang == 'sv':
if retval is None:
self.write('return')
else:
self.write(f"return {self.svexpr(node.expr, self.aux_funcs)}")
else:
if retval is not None:
self.write(
f"{self.svexpr(node.func.name, self.aux_funcs)} = {self.svexpr(node.expr, self.aux_funcs)}"
)
def FuncBlock(self, node):
self.block_lines.append(BlockLines())
self.header('')
self.HDLBlock(node)
self.footer(f'endfunction')
self.footer('')
def LoopBlock(self, node):
self.HDLBlock(node)
def HDLBlock(self, node):
self.enter_block(node)
for stmt in node.stmts:
self.visit(stmt)
self.trim_cur_block()
self.exit_block(node)
def IfElseBlock(self, node):
self.enter_block(node)
for i, stmt in enumerate(node.stmts):
if i > 0:
stmt.else_branch = True
else:
stmt.else_branch = False
self.visit(stmt)
content = []
for c in reversed(self.cur_block_lines.content):
if not content and not c.content:
continue
content.insert(0, c)
self.cur_block_lines.content = content
self.exit_block(node)
gear_module_template = """
{%- import 'snippet.j2' as snippet -%}
{% call snippet.gear_module(module_name, intfs, comment, sigs) %}
{{svlines|indent(4,True)}}
{%- endcall %}
"""
def write_block(block, writer):
for h in block.header:
writer.line(h)
if block.header:
writer.indent += 4
for c in block.content:
if isinstance(c, str):
writer.line(c + ';')
else:
write_block(c, writer)
if block.header:
writer.indent -= 4
if block.footer and block.footer[0] == 'endfunction':
writer.line('endfunction')
else:
writer.line('end')
def typedef_or_inline(writer, dtype, name):
res = svgen_typedef(dtype, name)
if "\n" not in res[:-1]:
return res.partition(' ')[-1].rpartition(';')[0].rpartition(' ')[0]
writer.block(res)
return f'{name}_t'
def svcompile(hdl_stmts, ctx, title, selected, lang, aux_funcs=None):
writer = HDLWriter()
v = SVCompiler(ctx, title, writer, selected=selected, lang=lang, aux_funcs=aux_funcs)
v.visit(hdl_stmts)
if not v.block_lines[0].content:
return ''
write_block(v.block_lines[0], writer)
writer.line()
return str(writer)
# TODO: Why do we need this check, can we generalize this for any variable?
def is_top_port_intf(name, ctx):
for p in ctx.gear.in_ports + ctx.gear.out_ports:
if p.basename == name:
return p
else:
return None
def is_port_intf(name, ctx):
for m in ctx.submodules:
for p in m.in_ports:
if p.name == name:
return p
for p in ctx.gear.in_ports:
if p.basename == name:
if p.consumer:
return p
else:
return None
for p in ctx.gear.out_ports:
if p.basename == name:
return p
def write_declarations(ctx, subsvmods, template_env):
writer = HDLWriter()
lang = template_env.lang
if lang == 'sv':
from .sv_expression import svexpr
sep = '.'
exprgen = svexpr
else:
from .v.v_expression import vexpr
sep = '.'
exprgen = vexpr
for name, expr in ctx.regs.items():
name = exprgen(ctx.ref(name))
if lang == 'sv':
writer.line(f'logic {name}_en;')
name_t = typedef_or_inline(writer, expr.dtype, name)
writer.line(f'{name_t} {name}, {name}_next;')
else:
writer.line(f'reg {name}_en;')
writer.block(vgen_signal(expr.dtype, 'reg', f'{name}_next', 'output', hier=False))
writer.block(vgen_signal(expr.dtype, 'reg', name, 'output', hier=False))
writer.line()
for name, expr in ctx.intfs.items():
dtype = expr.dtype.dtype
if is_top_port_intf(name, ctx) is None:
if lang == 'sv':
writer.line(f'dti#({dtype.width}) {name}();')
else:
writer.line(f'reg {name}_ready;')
writer.line(f'reg {name}_valid;')
writer.line(vgen_signal(dtype, 'reg', f'{name}_data', 'output', False))
if lang == 'sv':
name_t = typedef_or_inline(writer, dtype, name)
writer.line(f'{name_t} {name}_s;')
if expr.dtype.direction == ir.IntfType.iin:
writer.line(f'assign {name}_s = {sep.join([name, "data"])};')
else:
writer.line(f'assign {sep.join([name, "data"])} = {name}_s;')
else:
if expr.dtype.direction == ir.IntfType.iin:
writer.block(vgen_signal(dtype, 'reg', f'{name}_s', 'input', False))
writer.line(f"assign {name}_s = {name}_data;")
else:
writer.block(vgen_signal(dtype, 'reg', f'{name}_s', 'output', False))
writer.line(f"assign {name}_data = {name}_s;")
writer.line()
for name, expr in ctx.variables.items():
if expr.dtype is None or typeof(expr.dtype, ir.IntfType):
continue
if lang == 'sv':
name_t = typedef_or_inline(writer, expr.dtype, name)
writer.line(f'{name_t} {name};')
else:
writer.block(vgen_signal(expr.dtype, 'reg', name, 'input', hier=False))
writer.line()
for c, s in zip(ctx.submodules, subsvmods):
port_map = {}
for intf, p in itertools.chain(zip(c.in_ports, c.gear.in_ports),
zip(c.out_ports, c.gear.out_ports)):
if lang == 'sv':
port_map[p.basename] = intf.name
else:
port_map[p.basename] = (intf.name, None, None)
writer.block(s.get_inst(template_env, port_map))
if ctx.regs:
writer.line(f'initial begin')
for name, expr in ctx.regs.items():
if not isinstance(expr.val, ir.ResExpr):
continue
writer.line(f" {exprgen(ctx.ref(name))} = {exprgen(expr.val)};")
writer.line(f'end')
return str(writer)
def write_module(ctx: Context, hdl, writer, subsvmods, funcs, template_env, config=None):
if config is None:
config = {}
lang = template_env.lang
aux_funcs = {}
if lang == 'sv':
from .sv_expression import svexpr
sep = '.'
exprgen = svexpr
else:
from .v.v_expression import vexpr
sep = '.'
exprgen = vexpr
for f_hdl, f_ctx in funcs:
size = ''
if f_hdl.ret_dtype.width > 0:
size = f'[{f_hdl.ret_dtype.width-1}:0]'
if getattr(f_hdl.ret_dtype, 'signed', False):
size = f'signed {size}'
writer.line(f'function {size} {f_hdl.name};')
writer.indent += 4
for name, expr in f_ctx.variables.items():
if expr.dtype is None:
continue
if name in f_ctx.signature:
continue
if lang == 'sv':
name_t = typedef_or_inline(writer, expr.dtype, name)
writer.line(f'{name_t} {name};')
else:
writer.block(vgen_signal(expr.dtype, 'reg', name, 'input', hier=False))
writer.line()
if lang == 'sv':
| |
# -*- coding: utf-8 -*-
#import pdb; pdb.set_trace()
from __future__ import unicode_literals, print_function
import sys
import os
import pyloco
# Push name collection to leaf nodes
# filter part of subnodes to stop the push
# distinguish definition from reference
#self._search_subnodes(node, ids, rtypes)
class PassSet(set):
def __init__(self, iterable=None):
self.elements = iterable
def __or__(self, other):
return other
def __ior__(self, other):
return other
def __ror__(self, other):
return other
def __and__(self, other):
return other if self.elements is None else other & set(self.elements)
def __iand__(self, other):
return other if self.elements is None else other & set(self.elements)
def __rand__(self, other):
return other if self.elements is None else other & set(self.elements)
name_resolvers = {
"Program_Name": ["Program_Stmt"],
"Object_Name": None,
"Part_Name": None,
"Scalar_Variable_Name": None,
"Type_Name": ["Derived_Type_Stmt", "Use_Stmt"],
"Procedure_Component_Name": None,
"Procedure_Name": None,
"Binding_Name": None,
"Type_Param_Name": None,
"Entry_Name": ["Entry_Stmt"],
"Type_Param_Name_List": None,
"Component_Name": None,
"Interface_Name": None,
"Arg_Name": None,
"Procedure_Entity_Name": None,
"Binding_Name_List": None,
"Final_Subroutine_Name_List": None,
"Final_Subroutine_Name": None,
"Function_Name": ["Function_Subprogram", "Use_Stmt"],
"Subroutine_Name": ["Subroutine_Subprogram", "Use_Stmt"],
"Procedure_Name_List": None,
"Object_Name_List": None,
"Entity_Name": None,
"Common_Block_Name": None,
"Proc_Pointer_Name": None,
"Variable_Name": None,
"Array_Name": None,
"External_Name_List": None,
"External_Name": None,
"Intrinsic_Procedure_Name_List": None,
"Intrinsic_Procedure_Name": None,
"Proc_Entity_Name": None,
"Entity_Name_List": None,
"Do_Construct_Name": ["Label_Do_Stmt", "Nonlabel_Do_Stmt"],
"Index_Name": None,
"Associate_Construct_Name": ["Associate_Stmt"],
"Associate_Name": None,
"Case_Construct_Name": None,
"Forall_Construct_Name": ["Forall_Construct_Stmt"],
"Where_Construct_Name": ["Where_Construct_Stmt"],
"If_Construct_Name": ["If_Then_Stmt"],
"Select_Construct_Name": ["Select_Type_Stmt"],
"Block_Data_Name": None,
}
# NOTE: (resstmt, scope_stmt, forstmt)
res_all_stmts = {
"Derived_Type_Stmt": (None, None),
"Type_Param_Def_Stmt": ("Derived_Type_Def", None), # within Type Construct
"Data_Component_Def_Stmt": ("Derived_Type_Def", None), # within Type Construct
"Proc_Component_Def_Stmt": ("Derived_Type_Def", None), # within Type Construct
"Specific_Binding": ("Derived_Type_Def", None), # within Type Construct
"Generic_Binding": ("Derived_Type_Def", None), # within Type Construct
"Final_Binding": ("Derived_Type_Def", None), # within Type Construct
"Enumerator_Def_Stmt": ("Enum_Def", None), # within Enum_Def
"Type_Declaration_Stmt": (None, None),
"Namelist_Stmt": (None, None),
"Equivalence_Stmt": (None, None),
"External_Stmt": (None, None),
"Common_Stmt": (None, None),
"Bind_Stmt": (None, None),
"Associate_Stmt": ("Associate_Construct", None), # within associate construct
"Select_Type_Stmt": ("Select_Type_Construct", None), # within select construct
"Do_Stmt": (None, ("Cycle_Stmt", "Exit_Stmt")),
"Label_Do_Stmt": (None, ("Cycle_Stmt", "Exit_Stmt")), # for Cycle_Stmt and Exit_Stmt
"Format_Stmt": (None, ("Write_Stmt", "Read_Stmt")),
"Module_Stmt": (None, ("Use_Stmt",)),
"Use_Stmt": (None, None),
"Interface_Stmt": (None, None),
"Import_Stmt": ("Interface_Block", None), # within interface construct
"Procedure_Declaration_Stmt": (None, None),
"Function_Stmt": (None, None),
"Subroutine_Stmt": (None, ("Call_Stmt",)),
"Entry_Stmt": (None, None),
"Stmt_Function_Stmt": (None, None)
}
class Referer(pyloco.Task):
_name_ = "referer"
_version_ = "0.1.0"
def __init__(self, parent):
self.resmap = {}
resmap = os.path.join(os.path.dirname(__file__), "resmap.csv")
self.add_data_argument("tree", help="tree to search")
self.add_data_argument("node", help="top node identifier to search")
self.add_option_argument("--mapfile", default=resmap, help="resoultion map file")
self.register_forward("ids", help="identifiers collected")
def perform(self, targs):
if os.path.isfile(targs.mapfile):
with open(targs.mapfile) as fh:
for line in fh:
items = line.strip().split(",")
if items:
_n, _r = items[0], items[1:]
res = set()
if _r:
for _rn in _r:
if not _rn:
continue
elif _rn == "PASS":
res = PassSet(name_resolvers[_n])
else:
res.add(_rn)
self.resmap[_n] = res
else:
self.resmap[_n] = res
else:
raise Exception("Resolution map file is not found: %s." % targs.mapfile)
self.resmap["tuple"] = PassSet()
self.tree = targs.tree
ids = {}
if targs.node:
self._search(self.tree[targs.node.identifier], ids, set(res_all_stmts.keys()), None)
self.add_forward(ids=ids)
def _search(self, node, ids, rtypes, followups=None):
if node.tag.startswith("End_"):
return
if node.tag == "Name":
ids[node] = (rtypes, followups)
return
if node.tag in ("str", "NoneType"):
return
rtypes = rtypes & self.resmap[node.tag]
if node.tag.endswith("_List"):
for child in self.tree.children(node.identifier):
self._search(child, ids, set(rtypes))
else:
getattr(self, "search_"+node.tag)(node, ids, rtypes)
def _search_subnodes(self, node, ids, rtypes, includes=[], excludes=[]):
subnodes = []
rtypes = rtypes & self.resmap[node.tag]
children = self.tree.children(node.identifier)
if includes:
for item in includes:
if item in children:
subnodes.append(item)
elif isinstance(item, int) and item < len(children):
subnodes.append(children[item])
else:
subnodes = children
for idx, subnode in enumerate(subnodes):
if idx in excludes or subnode in excludes:
continue
self._search(subnode, ids, rtypes)
# def _search_noname(self, node, ids, rtypes):
#
# if node.tag != "Name":
# self._search(node, ids, rtypes)
def search_Access_Stmt(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes, includes=[1])
def search_Actual_Arg(self, node, ids, rtypes):
"""
<actual-arg> = <expr>
| <variable>
| <procedure-name>
| <proc-component-ref>
| <alt-return-spec>
"""
import pdb; pdb.set_trace()
self._search_subnodes(node, ids, rtypes)
def search_Actual_Arg_Spec(self, node, ids, rtypes):
"""
<actual-arg-spec> = [ <keyword> = ] <actual-arg>
"""
self._search_subnodes(node, ids, rtypes, includes=[1])
def search_Add_Operand(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes, excludes=[1])
def search_Allocate_Shape_Spec(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Allocate_Stmt(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Allocation(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_And_Operand(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes, includes=[1])
def search_Array_Constructor(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes, includes=[1])
def search_Array_Section(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Assignment_Stmt(self, node, ids, rtypes):
"""
<assignment-stmt> = <variable> = <expr>
"""
self._search_subnodes(node, ids, rtypes, excludes=[1])
def search_Assumed_Shape_Spec(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Attr_Spec(self, node, ids, rtypes):
# NOTE: literal string attribute spec
pass
def search_Block_Nonlabel_Do_Construct(self, node, ids, rtypes):
"""
R826_2
<block-nonlabel-do-construct> = <nonlabel-do-stmt>
[ <execution-part-construct> ]...
<end-do-stmt>
"""
self._search_subnodes(node, ids, rtypes)
def search_Call_Stmt(self, node, ids, rtypes):
"""
<call-stmt> = CALL <procedure-designator>
[ ( [ <actual-arg-spec-list> ] ) ]
"""
self._search_subnodes(node, ids, rtypes)
def search_Char_Literal_Constant(self, node, ids, rtypes):
'''
char-literal-constant is [ kind-param _ ] ' rep-char '
or [ kind-param _ ] " rep-char "
'''
self._search_subnodes(node, ids, rtypes, includes=[1])
def search_Comment(self, node, ids, rtypes):
pass
def search_Component_Attr_Spec(self, node, ids, rtypes):
# NOTE: literal string component attribute spec
pass
def search_Component_Decl(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes, excludes=[0])
def search_Component_Part(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Contains_Stmt(self, node, ids, rtypes):
pass
def search_Data_Component_Def_Stmt(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Data_Ref(self, node, ids, rtypes):
_followups = []
_search_subnodes = []
subnodes = self.tree.children(node.identifier)
# Part_Ref subnodes
for pref in subnodes[1:]:
pref_node = self.tree[pref.identifier]
if pref_node.tag == "Name":
_followups.append((pref_node, self.resmap["Part_Ref"]))
else:
import pdb; pdb.set_trace()
pref_subnodes = self.tree.children(pref.identifier)
pname, sec = pref.subnodes
_search_subnodes.append(sec)
self._search(subnodes[0], ids, rtypes, followups=_followups)
for _s in _search_subnodes:
self._search(_s, ids, rtypes)
def search_Deallocate_Stmt(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Declaration_Type_Spec(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Deferred_Shape_Spec(self, node, ids, rtypes):
pass
def search_Derived_Type_Def(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Derived_Type_Stmt(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes, excludes=[1])
def search_Dimension_Attr_Spec(self, node, ids, rtypes):
"""
<dimension-attr-spec> = DIMENSION ( <array-spec> )
"""
subnodes = self.tree.children(node.identifier)
self._search(subnodes[1], ids, rtypes)
def search_Else_Stmt(self, node, ids, rtypes):
pass
def search_Entity_Decl(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes, excludes=[0])
def search_Execution_Part(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Explicit_Shape_Spec(self, node, ids, rtypes):
"""
<explicit-shape-spec> = [ <lower-bound> : ] <upper-bound>
"""
subnodes = self.tree.children(node.identifier)
self._search(subnodes[0], ids, rtypes)
self._search(subnodes[1], ids, rtypes)
def search_Format(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Function_Stmt(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes, excludes=[1])
def search_Function_Subprogram(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_If_Construct(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_If_Stmt(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_If_Then_Stmt(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Implicit_Part(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Implicit_Stmt(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Initialization(self, node, ids, rtypes):
subnodes = self.tree.children(node.identifier)
self._search(subnodes[1], ids, rtypes)
def search_Int_Literal_Constant(self, node, ids, rtypes):
"""
<int-literal-constant> = <digit-string> [ _ <kind-param> ]
"""
digit, kind = self.tree.children(node.identifier)
self._search(kind, ids, rtypes)
def search_Internal_Subprogram_Part(self, node, ids, rtypes):
subnodes = self.tree.children(node.identifier)
self._search(subnodes[1], ids, rtypes)
def search_Intrinsic_Type_Spec(self, node, ids, rtypes):
"""
<intrinsic-type-spec> = INTEGER [ <kind-selector> ]
| REAL [ <kind-selector> ]
| DOUBLE COMPLEX
| COMPLEX [ <kind-selector> ]
| CHARACTER [ <char-selector> ]
| LOGICAL [ <kind-selector> ]
Extensions:
| DOUBLE PRECISION
| BYTE
"""
i_type, selector = self.tree.children(node.identifier)
# TODO: if selector is to be collected, handle it here
self._search(selector, ids, rtypes)
def search_Kind_Selector(self, node, ids, rtypes):
self._search_subnodes(node, ids, rtypes)
def search_Length_Selector(self, node, ids, rtypes):
"""
<length -selector> = ( [ LEN = ] <type-param-value> )
| * <char-length> [ , ]
"""
self._search_subnodes(node, ids, rtypes)
def search_Level_2_Expr(self, node, ids, rtypes):
"""
<level-2-expr> = [ [ <level-2-expr> ] <add-op> ] <add-operand>
<level-2-expr> = [ <level-2-expr> <add-op> ] <add-operand>
| <level-2-unary-expr>
<add-op> = +
| -
"""
subnodes = self.tree.children(node.identifier)
self._search(subnodes[0], ids, rtypes)
self._search(subnodes[2], ids, rtypes)
def search_Level_4_Expr(self, node, ids, rtypes):
subnodes = self.tree.children(node.identifier)
self._search(subnodes[0], ids, rtypes)
self._search(subnodes[2], ids, rtypes)
def search_Logical_Literal_Constant(self, node, ids, rtypes):
pass
def search_Loop_Control(self, node, ids, rtypes):
"""
R830
<loop-control> = [ , ] <do-variable> = scalar-int-expr,
scalar-int-expr
[ , <scalar-int-expr> ]
| [ , ] WHILE ( <scalar-logical-expr> )
"""
scalar_logical_expr, counter_expr, optional_delim = self.tree.children(node.identifier)
import pdb; pdb.set_trace()
if scalar_logical_expr is not None:
self._search(scalar_logical_expr, | |
None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5], dtype=np.float64)))
def test_process_continuous_obj_simple_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), np.array([True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 6)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(np.isnan(vals[5]))
def test_process_continuous_obj_hard():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), DerivedStringHolder("7.5"), FloatHolder(8.5), DerivedFloatHolder(9.5), FloatAndStringHolder(10.5, "88"), DerivedFloatAndStringHolder(11.5, "99")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5], dtype=np.float64)))
def test_process_continuous_obj_hard_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5")], dtype=np.object_), np.array([True, True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 7)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[6]))
def test_process_continuous_obj_hard_bad():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), "bad", StringHolder("bad2"), NothingHolder("bad3")], dtype=np.object_), np.array([True, True, True, True, True, True, True, False, True, True], dtype=np.bool_))
assert(len(bad) == 10)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] is None)
assert(bad[3] is None)
assert(bad[4] is None)
assert(bad[5] is None)
assert(bad[6] == "bad")
assert(bad[7] is None)
assert(bad[8] == "bad2")
assert(isinstance(bad[9], str))
assert(vals.dtype == np.float64)
assert(len(vals) == 10)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[7]))
def test_process_continuous_str_simple():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5], dtype=np.float64)))
def test_process_continuous_str_simple_missing():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), np.array([True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[2]))
def test_process_continuous_str_hard_bad():
vals, bad = _process_continuous(np.array(["1", "2.5", "bad"], dtype=np.unicode_), np.array([True, True, True, False], dtype=np.bool_))
assert(len(bad) == 4)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] == "bad")
assert(bad[3] is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 4)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[3]))
def test_process_column_initial_int_float():
# this test is hard since np.unique seems to think int(4) == float(4.0) so naively it returns just "4"
encoded, c = _process_column_initial(np.array([4, 4.0], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["4"] == 1)
assert(c["4.0"] == 2)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_process_column_initial_float32_float64():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
encoded, c = _process_column_initial(np.array([np.float32(0.1), np.float64(0.1)], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["0.1"] == 1)
assert(c["0.10000000149011612"] == 2)
assert(np.array_equal(encoded, np.array([c["0.10000000149011612"], c["0.1"]], dtype=np.int64)))
def test_process_column_initial_obj_obj():
encoded, c = _process_column_initial(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["def"] == 2)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_float64_nomissing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), None, 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], c["11.1"]], dtype=np.int64)))
def test_process_column_initial_float64_missing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], 0, c["11.1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str():
c = {"cd": 1, "ab": 2}
encoded, bad = _encode_categorical_existing(np.array(["ab", "cd"], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["ab"], c["cd"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_bool():
c = {"True": 1, "False": 2}
encoded, bad = _encode_categorical_existing(np.array([True, False], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["True"], c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_small():
c = {"-2": 1, "3": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64(3)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["3"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_big():
c = {"-2": 1, "18446744073709551615": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64("18446744073709551615")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["18446744073709551615"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_floats():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
c = {"1.1": 1, "2.19921875": 2, "3.299999952316284": 3, "4.4": 4, "5.5": 5}
encoded, bad = _encode_categorical_existing(np.array([float(1.1), np.float16(2.2), np.float32(3.3), np.float64(4.4), np.longfloat(5.5)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1.1"], c["2.19921875"], c["3.299999952316284"], c["4.4"], c["5.5"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_int():
c = {"abc": 1, "1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", int(1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", float(1.1)], dtype=np.object_), None, c)
| |
<reponame>lustered/youtube-mp3-GUI<gh_stars>1-10
from __future__ import unicode_literals
import os
import random
from time import sleep
import mutagen.mp3
import fnmatch
import pygame
from pygame import mixer
from tkinter import *
from tkinter import ttk
from subprocess import Popen
from threading import Thread
from subprocess import *
# import youtube_dl
import yt_dlp
from .utils import theme, tStyle, playVideo
class Cuteplayer(Frame):
# Path stuff
path = "" + os.path.expanduser("~/Music") + "/cuteplayer/"
print("*" * 90)
if not os.path.exists(path):
os.mkdir(path)
else:
print("Download directory exists at: ", path)
# List with songs being displayed in the table
mp3_songs = []
# Current actual playlist Eg. Shuffle/Normal orders
playlist = []
videos = []
themes = ["bliss", "pastel", "rainy", "flame"]
ctheme = None
currentSong = None
timelineid = None
queid = None
vol = 0.4
sample_rate = 48000
crtime = 0
busy = None
def __init__(self, master, _theme="pastel"):
super().__init__(master)
# update youtube-dl on start
uthread = Thread(target=lambda: os.system("pip3 install --upgrade youtube-dl"))
uthread.start()
self.palette = theme(_theme)
self.ctheme = _theme
self.master = master
self.windowSettings()
self.mainMenu()
mixer.pre_init(self.sample_rate, -16, 0)
self.music_settings()
self.songsTable()
self.updateTable()
self.updateTimeline()
self.pack()
def windowSettings(self):
"""Set the main window settings"""
self.master.geometry("330x560")
self.master.title("cuteplayer")
self.master.configure(bg=self.palette["bgcolor"])
self.master.resizable(False, False)
self.master.grid_propagate(False)
def mainMenu(self):
############################## Buttons Setup ###########################
self.entry = Entry(
self,
fg=self.palette["entrytext"],
background=self.palette["entrybg"],
font=("ARCADECLASSIC", 15),
highlightbackground=self.palette["bgcolor"],
bd=3,
highlightthickness=0,
)
self.quit = Button(
self,
text="quit",
bg=self.palette["buttonbg"],
fg=self.palette["buttontext"],
font=("ARCADECLASSIC", 20),
highlightbackground=self.palette["bgcolor"],
highlightthickness=3,
activebackground=self.palette["activebuttonbg"],
command=lambda: [mixer.quit(), self.master.destroy()],
)
self.dl = Button(
self,
text="download",
bg=self.palette["buttonbg"],
fg=self.palette["buttontext"],
font=("ARCADECLASSIC", 20),
highlightbackground=self.palette["bgcolor"],
highlightthickness=3,
activebackground=self.palette["activebuttonbg"],
command=lambda: Thread(target=self.download).start(), # Run a new thread
)
self.play = Button(
self,
text="play",
bg=self.palette["buttonbg"],
fg=self.palette["buttontext"],
font=("ARCADECLASSIC", 20),
highlightbackground=self.palette["bgcolor"],
highlightthickness=3,
activebackground=self.palette["activebuttonbg"],
command=lambda: [
mixer.music.unpause(),
self.setbusy(False),
self.updateTimeline(),
],
)
self.pause = Button(
self,
text="pause",
bg=self.palette["buttonbg"],
fg=self.palette["buttontext"],
font=("ARCADECLASSIC", 20),
highlightbackground=self.palette["bgcolor"],
highlightthickness=3,
activebackground=self.palette["activebuttonbg"],
command=lambda: [
mixer.music.pause(),
self.setbusy(True),
self.after_cancel(self.timelineid),
],
)
self.shuffleSongList = Button(
self,
text="shuffle",
bg=self.palette["buttonbg"],
fg=self.palette["buttontext"],
font=("ARCADECLASSIC", 20),
highlightthickness=3,
highlightbackground=self.palette["bgcolor"],
activebackground=self.palette["activebuttonbg"],
command=self._shuffle,
)
self.skipButton = Button(
self,
text="skip",
bg=self.palette["buttonbg"],
fg=self.palette["buttontext"],
font=("ARCADECLASSIC", 20),
highlightbackground=self.palette["bgcolor"],
highlightthickness=3,
activebackground=self.palette["activebuttonbg"],
command=self.skip,
)
self.CurSong = Label(
self,
bg=self.palette["bgcolor"],
text="Now\tPlaying",
fg=self.palette["currentsongtext"],
font=("ARCADECLASSIC", 10),
wraplength=250,
)
self.VolumeSlider = Scale(
self,
length=5,
font="ARCADECLASSIC",
orient="horizontal",
bg=self.palette["bgcolor"],
fg=self.palette["volumetext"],
showvalue=0,
command=self.VolAdjust,
highlightthickness=10,
highlightbackground=self.palette["bgcolor"],
troughcolor=self.palette["volumetroughcolor"],
activebackground=self.palette["bgcolor"],
borderwidth=0,
)
# Set the default volume
self.VolumeSlider.set(self.vol * 100)
self.VolumeSlider.configure(label="%60s" % ("volume"))
self.timeline = Scale(
self,
length=100,
font="ARCADECLASSIC",
orient="horizontal",
bg=self.palette["bgcolor"],
fg=self.palette["timelinetext"],
showvalue=0,
highlightthickness=10,
highlightbackground=self.palette["bgcolor"],
troughcolor=self.palette["timelinetroughcolor"],
label=" ",
activebackground=self.palette["bgcolor"],
borderwidth=0,
)
############################################################
############################# Packing ##############################
self.entry.grid(
row=0, column=0, columnspan=3, sticky=W + E + N + S
) # , padx=3, pady=3)
self.dl.grid(row=1, column=0, sticky=NSEW)
self.quit.grid(row=1, column=1, sticky=NSEW)
self.play.grid(row=2, column=0, sticky=NSEW)
self.pause.grid(row=2, column=1, sticky=NSEW)
self.skipButton.grid(row=5, column=0, sticky=NSEW)
self.shuffleSongList.grid(row=5, column=1, sticky=NSEW)
self.CurSong.grid(row=6, column=0, columnspan=2, sticky=NSEW)
self.timeline.grid(row=7, column=0, columnspan=3, sticky=NSEW)
self.VolumeSlider.grid(row=8, column=0, columnspan=3, sticky=NSEW)
############################################################
############################### Keybindings ###############################
self.timeline.bind("<Button-1>", lambda event: self.after_cancel(self.timelineid))
self.timeline.bind("<ButtonRelease-1>", self.setTimeline)
self.timeline.bind_all("t", self.nextTheme)
self.timeline.bind_all("p", self.togglePlay)
self.timeline.bind_all("n", self.skip)
self.timeline.bind_all("s", self._shuffle)
self.timeline.bind_all("d", self.download)
self.timeline.bind_all("q", lambda x: [mixer.quit(), self.master.destroy()])
##############################################################
def togglePlay(self, event=None):
""" Toggle pause and play """
if self.busy:
mixer.music.unpause()
self.setbusy(False)
self.updateTimeline()
else:
mixer.music.pause()
self.setbusy(True)
self.after_cancel(self.timelineid)
def nextTheme(self, event):
print("Changing theme")
try:
self.ctheme = self.themes[self.themes.index(self.ctheme) + 1]
except:
self.ctheme = self.themes[0]
self.palette = theme(self.ctheme)
print("New theme: ", self.ctheme)
# Redraw all the widgets and frame to apply the new palette
self.windowSettings()
self.mainMenu()
self.songsTable()
self.updateTable()
def setbusy(self, state):
""" Set current state """
self.busy = state
def VolAdjust(self, vol):
self.vol = int(vol) / 100
mixer.music.set_volume(self.vol)
def skip(self, event=None):
""" Play the next song in the playlist """
if not self.currentSong:
return
try:
self.currentSong = self.playlist[self.playlist.index(self.currentSong) + 1]
except IndexError:
print("Reached the end of the list...\nStarting over.")
self.currentSong = self.playlist[0]
self.updatenplay()
def selectedItem(self, event):
"""Play a song when clicking on the table"""
if self.queid is not None:
self.after_cancel(self.queid)
# If the item selected is a from the song menu
if self.menu.index(self.menu.select()) == 0:
try:
curItem = self.table.focus()
# Remove the selection dashed lines after the focus redraws
# self.master.focus_set()
self.currentSong = self.path + self.table.item(curItem)["text"]
# Override playlist if the user manually selects a song from the table
# This is needed as the _shuffle function rearranges the order
self.playlist = ["" + self.path + song for song in self.mp3_songs]
self.updatenplay()
except (FileNotFoundError, Exception):
sleep(1)
self.updatenplay()
else: # if it's a video
# Pause the current song if there's something playing.
if self.timelineid is not None:
self.after_cancel(self.timelineid)
self.setbusy(True)
mixer.music.pause()
# Path to video
pv = self.path + self.vtable.item(self.vtable.focus())["text"]
# Start a new thread with the video playing.
# This is needed to not lock up the tk frame and allows for multiple instances
# of videos.
vthread = Thread(target=lambda: playVideo(pv))
vthread.start()
self.que_song()
def updatenplay(self):
try:
# override sample rate for song
sample_rate = mutagen.mp3.MP3(self.currentSong).info.sample_rate
# set appropiate sample rate if the song selected has a different one
if self.sample_rate != sample_rate:
print("new sample rate: ", sample_rate)
self.sample_rate = sample_rate
except mutagen.MutagenError:
pass
# Re-init settings
try:
self.music_settings()
mixer.music.load(self.currentSong)
mixer.music.play()
# Only show up to 30 characters to avoid line wrap
self.CurSong.configure(text=str(self.currentSong[len(self.path) : -4])[:30])
self.setbusy(False)
self.crtime = 0
self.updateTimeline()
# Getting the correct child_id for the currently playing song. We need this so
# we can focus the item on the songs table, then it'll be highlighted
if self.currentSong:
child_index = self.mp3_songs.index(self.currentSong[len(self.path) :])
child_id = self.table.get_children()[child_index]
self.table.selection_set(child_id)
print(":: %s" % self.currentSong[len(self.path) : -4])
except pygame.error:
print("Some pygame error I can't fix")
pass
def music_settings(self):
""" Reset sample rate since it may vary from each song """
# In case we change sample rate
mixer.quit()
mixer.init(self.sample_rate)
mixer.music.set_volume(self.vol)
def _shuffle(self, event=None):
""" Shuffle all current songs in the download directory and play them """
if self.queid is not None:
self.after_cancel(self.queid)
self.playlist = random.sample(self.mp3_songs, len(self.mp3_songs))
self.playlist = ["" + self.path + song for song in self.playlist]
print("********** Current Playlist ********** ")
for index, song in enumerate(self.playlist):
print("%s - : %s" % (index, song.strip(self.path)))
if self.playlist:
self.currentSong = self.playlist[0]
# self.updatenplay(_shuffcall=self.mp3_songs.index(self.currentSong[len(self.path) :]))
self._shuffcall = True
self.updatenplay()
# if self.playlist:
self.que_song()
def que_song(self):
""" Used to queu the next song """
if int(mixer.music.get_pos()) == -1:
self.skip()
self.queid = self.after(1000, self.que_song)
def songsTable(self):
""" Widget Treeview/table with songs """
# Get customized style
style = tStyle()
# Tabs
self.menu = ttk.Notebook(self)
self.table = ttk.Treeview(self.menu, columns=("songNumber"), style="Treeview")
self.vtable = ttk.Treeview(self.menu, columns=("songNumber"), style="Treeview")
# Column config
self.table.column("songNumber", width=-50)
self.table.heading("songNumber", text="☪ ")
self.vtable.column("songNumber", width=-50)
self.vtable.heading("songNumber", text="☪ ")
self.menu.add(self.table, text="mp3")
self.menu.add(self.vtable, text="videos")
self.menu.grid(
row=3,
column=0,
rowspan=2,
columnspan=3,
sticky=W + E + N + S,
)
# Menu selection
self.menu.bind("<ButtonRelease-1>", self.updateTable)
# Selecting songs from table event
self.table.bind("<Return>", self.selectedItem)
self.table.bind("<ButtonRelease-1>", self.selectedItem)
self.vtable.bind("<Return>", self.selectedItem)
self.vtable.bind("<ButtonRelease-1>", self.selectedItem)
def setTimeline(self, _time_event):
""" Set the position of the song in a timeline slider """
if mixer.music.get_busy():
self.after_cancel(self.timelineid)
self.crtime += mixer.music.get_pos() / 1000
# Check boundry since actual play time might be off and lock the frame.
# Usually happens when setting the slider to the end.
if self.timeline.get() <= mutagen.mp3.MP3(self.currentSong).info.length:
mixer.music.set_pos(self.timeline.get())
else:
mixer.music.set_pos(mutagen.mp3.MP3(self.currentSong).info.length - 1)
self.crtime += self.timeline.get() - self.crtime
self.updateTimeline()
return
def updateTimeline(self):
""" Update the song slider """
# Close other instances -> Otherwise the play button will spawn multiple instances
if self.timelineid is not None:
self.after_cancel(self.timelineid)
try:
song = mutagen.mp3.MP3(self.currentSong)
self.timeline.configure(to=song.info.length)
self.timeline.set(self.crtime)
self.crtime += 1
m, s = divmod(self.crtime, 60)
self.timeline.configure(label="%60s %1s: %2s" % (" ", int(m), int(s)))
except Exception:
pass
if self.busy is False:
self.timelineid = self.after(1000, self.updateTimeline)
def updateTable(self, _=None):
""" Refresh the song table list """
self.table.delete(*self.table.get_children())
self.vtable.delete(*self.vtable.get_children())
# list of mp3 songs in dir
for entry in os.listdir(self.path):
if fnmatch.fnmatch(entry, "*.mp3") and entry not in self.mp3_songs:
self.mp3_songs.append(entry)
# if fnmatch.fnmatch(entry, "*.mkv") and entry not in self.videos:
if entry.endswith((".mkv", ".mp4")) and entry not in self.videos:
self.videos.append(entry)
# add new song to table list
self.mp3_songs.sort()
self.videos.sort()
# Assign the correct tab/table elements
tab, table = (
(self.mp3_songs, self.table)
if self.menu.index(self.menu.select()) == 0
else (self.videos, self.vtable)
)
for i, song in enumerate(tab):
# table.insert("", i, text="%s" % song[: len(song) - 4], values=(i + 1))
table.insert("", i, text="%s" % song, values=(i + 1))
def download(self, event=None):
""" Download the song to the path and covert to mp3 if necessary """
dpath = self.path + "%(title)s.%(ext)s"
ydl_opts = {
"format": "bestvideo[ext=mp4]+bestaudio[ext=mp4]/mp4",
"outtmpl": dpath,
"keepvideo": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
print(":" * 30 | |
<filename>nested_lstm.py
from __future__ import absolute_import
import warnings
from keras import backend as K
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.engine import Layer
from keras.engine import InputSpec
from keras.legacy import interfaces
from keras.layers import RNN
from keras.layers.recurrent import _generate_dropout_mask, _generate_dropout_ones
from keras.layers import LSTMCell, LSTM
class NestedLSTMCell(Layer):
"""Nested NestedLSTM Cell class.
Derived from the paper [Nested LSTMs](https://arxiv.org/abs/1801.10308)
Ref: [Tensorflow implementation](https://github.com/hannw/nlstm)
# Arguments
units: Positive integer, dimensionality of the output space.
depth: Depth of nesting of the memory component.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
cell_activation: Activation function of the first cell gate.
Note that in the paper only the first cell_activation is identity.
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, must be 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self, units, depth,
activation='tanh',
recurrent_activation='sigmoid',
cell_activation='linear',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=False,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=2,
**kwargs):
super(NestedLSTMCell, self).__init__(**kwargs)
if depth < 1:
raise ValueError("`depth` must be at least 1. For better performance, consider using depth > 1.")
if implementation != 1:
warnings.warn(
"Nested LSTMs only supports implementation 2 for the moment. Defaulting to implementation = 2")
implementation = 2
self.units = units
self.depth = depth
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.cell_activation = activations.get(cell_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = tuple([self.units] * (self.depth + 1))
self._dropout_mask = None
self._nested_recurrent_masks = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernels = []
self.biases = []
for i in range(self.depth):
if i == 0:
input_kernel = self.add_weight(shape=(input_dim, self.units * 4),
name='input_kernel_%d' % (i + 1),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
hidden_kernel = self.add_weight(shape=(self.units, self.units * 4),
name='kernel_%d' % (i + 1),
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
kernel = K.concatenate([input_kernel, hidden_kernel], axis=0)
else:
kernel = self.add_weight(shape=(self.units * 2, self.units * 4),
name='kernel_%d' % (i + 1),
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.kernels.append(kernel)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
for i in range(self.depth):
bias = self.add_weight(shape=(self.units * 4,),
name='bias_%d' % (i + 1),
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.biases.append(bias)
else:
self.biases = None
self.built = True
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=1)
if (0 < self.recurrent_dropout < 1 and
self._nested_recurrent_masks is None):
_nested_recurrent_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=self.depth)
self._nested_recurrent_masks = _nested_recurrent_mask
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_masks = self._nested_recurrent_masks
h_tm1 = states[0] # previous memory state
c_tm1 = states[1:self.depth + 1] # previous carry states
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
h, c = self.nested_recurrence(inputs,
hidden_state=h_tm1,
cell_states=c_tm1,
recurrent_masks=rec_dp_masks,
current_depth=0)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, c
def nested_recurrence(self, inputs, hidden_state, cell_states, recurrent_masks, current_depth):
h_state = hidden_state
c_state = cell_states[current_depth]
if 0.0 < self.recurrent_dropout <= 1. and recurrent_masks is not None:
hidden_state = h_state * recurrent_masks[current_depth]
ip = K.concatenate([inputs, hidden_state], axis=-1)
gate_inputs = K.dot(ip, self.kernels[current_depth])
if self.use_bias:
gate_inputs = K.bias_add(gate_inputs, self.biases[current_depth])
i = gate_inputs[:, :self.units] # input gate
f = gate_inputs[:, self.units * 2: self.units * 3] # forget gate
c = gate_inputs[:, self.units: 2 * self.units] # new input
o = gate_inputs[:, self.units * 3: self.units * 4] # output gate
inner_hidden = c_state * self.recurrent_activation(f)
if current_depth == 0:
inner_input = self.recurrent_activation(i) + self.cell_activation(c)
else:
inner_input = self.recurrent_activation(i) + self.activation(c)
if (current_depth == self.depth - 1):
new_c = inner_hidden + inner_input
new_cs = [new_c]
else:
new_c, new_cs = self.nested_recurrence(inner_input,
hidden_state=inner_hidden,
cell_states=cell_states,
recurrent_masks=recurrent_masks,
current_depth=current_depth + 1)
new_h = self.activation(new_c) * self.recurrent_activation(o)
new_cs = [new_h] + new_cs
return new_h, new_cs
def get_config(self):
config = {'units': self.units,
'depth': self.depth,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'cell_activation': activations.serialize(self.cell_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(NestedLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class NestedLSTM(RNN):
"""Nested Long-Short-Term-Memory layer - [Nested LSTMs](https://arxiv.org/abs/1801.10308).
# Arguments
units: Positive integer, dimensionality of the output space.
depth: Depth of nesting of the memory component.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
cell_activation: Activation function of the first cell gate.
Note that in the paper only the first cell_activation is identity.
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default | |
for m in range(0,cols):
ys[n] = ys[n] + p[m+1,n]*y_seeds[n]
ys[n] = ys[n] + E[i,n]
for n in range(0,cols):
y_seeds[n] = ys[n]
syn_residuals[i,:] = np.reshape([ys],(1,cols))
for i in range(0,cols):
syn_residuals[:,i] = syn_residuals[:,i]*stds[i]*(1/np.std(syn_residuals[:,i])) + mus[i]
##################################################
# PATH NW
##################################################
#This only uses BPA wind and hydro
col_nw_T =['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_nw_W =['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','<NAME>']
num_cities = len(col_nw_T)
NW_sim_T=sim_weather[col_nw_T].values
NW_sim_W=sim_weather[col_nw_W].values
NW_sim_T=sim_weather[col_nw_T].values
NW_sim_W=sim_weather[col_nw_W].values
NW_sim_T_F=(NW_sim_T * 9/5) +32
NW_sim_W =NW_sim_W *2.23694
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-NW_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,NW_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(NW_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(NW_sim_W,binary_HDD_sim)
#Need Month,Day,Year,8 14 3 BPA_wind,BPA_hydro
sim_BPA_hydro = pd.read_csv('PNW_hydro/FCRPS/Path_dams.csv',header=None)
sim_BPA_hydro=sim_BPA_hydro.values
sim_BPA_hydro=np.sum(sim_BPA_hydro,axis=1)/24
#What is the common length
effect_sim_year=int(len(sim_BPA_hydro)/365)
sim_month=sim_month[:len(sim_BPA_hydro)]
sim_day=sim_day[:len(sim_BPA_hydro)]
sim_year=sim_year[:len(sim_BPA_hydro)]
sim_dow= sim_dow[:len(sim_BPA_hydro)]
sim_wind_power=pd.read_csv('Synthetic_wind_power/wind_power_sim.csv',header=0)
sim_BPA_wind_power= sim_wind_power.loc[:,'BPA']/24
sim_wind_daily = np.zeros((effect_sim_year*365,1))
for i in range(0,effect_sim_year*365):
sim_wind_daily[i] = np.sum((sim_BPA_wind_power.loc[i*24:i*24+24]))
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),sim_wind_daily,sim_BPA_hydro,sim_dow))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path8'}, inplace=True)
df_data_sim.rename(columns={4:'Path14'}, inplace=True)
df_data_sim.rename(columns={5:'Path3'}, inplace=True)
df_data_sim.rename(columns={6:'BPA_wind'}, inplace=True)
df_data_sim.rename(columns={7:'BPA_hydro'}, inplace=True)
df_data_sim.rename(columns={8:'Weekday'}, inplace=True)
df_data_sim.rename(columns={9:'Salem_HDD'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
lines = ['Path8','Path14','Path3']
upper = [1900,1500,1900]
lower = [-600,-900,-2200]
for line in lines:
name='predicted_' + str(line)
locals()[name]=[]
for line in lines:
predicted=[]
rc = np.shape(jan2.loc[:,'BPA_wind':])
n = rc[1]
y = df_data_sim.loc[:,line]
line_index = lines.index(line)
#regression names
name_1='jan_reg_NW' + str(line)
name_2='feb_reg_NW' + str(line)
name_3='mar_reg_NW' + str(line)
name_4='apr_reg_NW' + str(line)
name_5='may_reg_NW' + str(line)
name_6='jun_reg_NW' + str(line)
name_7='jul_reg_NW' + str(line)
name_8='aug_reg_NW' + str(line)
name_9='sep_reg_NW' + str(line)
name_10='oct_reg_NW' + str(line)
name_11='nov_reg_NW' + str(line)
name_12='dec_reg_NW' + str(line)
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
if predicted[i] > upper[line_index]:
predicted[i] = upper[line_index]
elif predicted[i] < lower[line_index]:
predicted[i] = lower[line_index]
name='predicted_' + str(line)
locals()[name]=predicted
syn_Path8=predicted_Path8+syn_residuals[:effect_sim_year*365,5]
syn_Path14=predicted_Path14+syn_residuals[:effect_sim_year*365,6]
syn_Path3=predicted_Path3+syn_residuals[:effect_sim_year*365,7]
bias = np.mean(syn_Path8) - np.mean(NWPaths_y[:,0])
syn_Path8 = syn_Path8 - bias
bias = np.mean(syn_Path14) - np.mean(NWPaths_y[:,1])
syn_Path14 = syn_Path14 - bias
bias = np.mean(syn_Path3) - np.mean(NWPaths_y[:,2])
syn_Path3 = syn_Path3 - bias
S = df_data_sim.values
HO = H.values
stats = np.zeros((69,4))
for i in range(0,69):
stats[i,0] = np.mean(S[:,i])
stats[i,1] = np.mean(HO[:,i])
stats[i,2] = np.std(S[:,i])
stats[i,3] = np.std(HO[:,i])
################################################################################
###################################################
## PATH 65 & 66
###################################################
col_6566_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_6566_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','<NAME>_W']
num_cities = len(col_6566_T)
P6566_sim_T=sim_weather[col_6566_T].values
P6566_sim_W=sim_weather[col_6566_W].values
P6566_sim_W =P6566_sim_W*2.23694
sim_days = len(sim_weather)
P6566_sim_T_F=(P6566_sim_T * 9/5) +32
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-P6566_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,P6566_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(P6566_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(P6566_sim_W,binary_HDD_sim)
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),sim_wind_daily,sim_BPA_hydro,syn_Path3,syn_Path8,syn_Path14,sim_dow))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path65'}, inplace=True)
df_data_sim.rename(columns={4:'Path66'}, inplace=True)
df_data_sim.rename(columns={5:'Wind'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
lines = ['Path65','Path66']
upper = [3100,4300]
lower = [-2210,-500]
for line in lines:
name='predicted_' + str(line)
locals()[name]=[]
for line in lines:
predicted=[]
rc = np.shape(jan2.loc[:,'Wind':])
n = rc[1]
y = df_data_sim.loc[:,line]
line_index = lines.index(line)
#regression names
name_1='jan_reg_6566' + str(line)
name_2='feb_reg_6566' + str(line)
name_3='mar_reg_6566' + str(line)
name_4='apr_reg_6566' + str(line)
name_5='may_reg_6566' + str(line)
name_6='jun_reg_6566' + str(line)
name_7='jul_reg_6566' + str(line)
name_8='aug_reg_6566' + str(line)
name_9='sep_reg_6566' + str(line)
name_10='oct_reg_6566' + str(line)
name_11='nov_reg_6566' + str(line)
name_12='dec_reg_6566' + str(line)
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
if predicted[i] > upper[line_index]:
predicted[i] = upper[line_index]
elif predicted[i] < lower[line_index]:
predicted[i] = lower[line_index]
name='predicted_' + str(line)
locals()[name]=predicted
syn_Path65= predicted_Path65 + syn_residuals[:effect_sim_year*365,13]
syn_Path66 = predicted_Path66 + syn_residuals[:effect_sim_year*365,14]
bias = np.mean(syn_Path65) - np.mean(Path65_66_y[:,0])
syn_Path65 = syn_Path65 - bias
bias = np.mean(syn_Path66) - np.mean(Path65_66_y[:,1])
syn_Path66 = syn_Path66 - bias
###################################################
## PATH 46
###################################################
#Find the simulated data at the sites
col_46_T = ['TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_46_W = ['TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
num_cities = len(col_46_T)
P46_sim_T=sim_weather[col_46_T].values
P46_sim_W=sim_weather[col_46_W].values
P46_sim_W =P46_sim_W *2.23694
sim_days = len(sim_weather)
P46_sim_T_F=(P46_sim_T * 9/5) +32
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-P46_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,P46_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(P46_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(P46_sim_W,binary_HDD_sim)
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
sim_Hoover = pd.read_csv('Synthetic_streamflows/synthetic_discharge_Hoover.csv',header=None)
sim_Hoover=sim_Hoover.values
sim_Hoover = sim_Hoover[:effect_sim_year*365]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),sim_dow,sim_Hoover,syn_Path65,syn_Path66))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path46'}, inplace=True)
df_data_sim.rename(columns={4:'Weekday'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
y = df_data_sim.loc[:,'Path46']
predicted_Path46 =[]
rc = np.shape(jan2.loc[:,'Weekday':])
n = rc[1]
upper = 185000
lower = 48000
predicted=[]
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jan_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = feb_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = mar_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = apr_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = may_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jun_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jul_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = aug_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = sep_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = oct_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = nov_reg_46.predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = dec_reg_46.predict(s)
predicted = np.append(predicted,p)
if predicted[i] > upper:
predicted[i] = upper
elif predicted[i] < lower:
predicted[i] = lower
predicted_Path46=predicted
syn_Path46=predicted_Path46+syn_residuals[:effect_sim_year*365,12]
bias = np.mean(syn_Path46) - np.mean(Path46_y)
syn_Path46 = syn_Path46 - bias
syn_Path46 = syn_Path46/24
#
################################
## Other CA PATHS
################################
col_ca_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_ca_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
num_cities = len(col_ca_T)
CA_sim_T=sim_weather[col_ca_T].values
CA_sim_W=sim_weather[col_ca_W].values
CA_sim_W =CA_sim_W *2.23694
CA_sim_T_F=(CA_sim_T * 9/5) +32
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = | |
x = self._2_act2(x)
#x = torch.cat([r1, r2, x], 1)
x = self._2_conv_proj(x)
x = self._2_bn_proj(x)
x = self._2_act_proj(x)
r2 = x
x1 = self._3_conv1(x)
x1 = self._3_bn1(x1)
x2 = self._3_conv1_h(x)
x2 = self._3_bn1_h(x2)
x3 = self._3_conv1_v(x)
x3 = self._3_bn1_v(x3)
x = x1+x2+x3
x = self._3_act1(x)
x1 = self._3_conv2(x)
x1 = self._3_bn2(x1)
x2 = self._3_conv2_h(x)
x2 = self._3_bn2_h(x2)
x3 = self._3_conv2_v(x)
x3 = self._3_bn2_v(x3)
x = x1+x2+x3
x = x + r2
x = self._3_act2(x)
r3 = x
x1 = self._4_conv1(x)
x1 = self._4_bn1(x1)
x2 = self._4_conv1_h(x)
x2 = self._4_bn1_h(x2)
x3 = self._4_conv1_v(x)
x3 = self._4_bn1_v(x3)
x = x1+x2+x3
x = self._4_act1(x)
x1 = self._4_conv2(x)
x1 = self._4_bn2(x1)
x2 = self._4_conv2_h(x)
x2 = self._4_bn2_h(x2)
x3 = self._4_conv2_v(x)
x3 = self._4_bn2_v(x3)
x = x1+x2+x3
x = x + r3
x = self._4_act2(x)
x = r2 + x
#x = torch.cat([r1, r2, r3, x], 1)
x = self._4_conv_proj(x)
x = self._4_bn_proj(x)
x = self._4_act_proj(x)
return x
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def downsample_conv(
in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):
norm_layer = norm_layer or nn.BatchNorm2d
kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size
first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1
p = get_padding(kernel_size, stride, first_dilation)
return nn.Sequential(*[
nn.Conv2d(
in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False),
norm_layer(out_channels)
])
class BottleNeckBlockV2(nn.Module):
def __init__(self, inplanes, first_planes, outplanes, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,):
super(BottleNeckBlockV2, self).__init__()
self.conv_dw = nn.Conv2d(first_planes, first_planes, groups = first_planes, kernel_size=3, padding=1, bias=False)
self.bn_dw = norm_layer(first_planes)
self.conv_dw_h = nn.Conv2d(first_planes, first_planes, groups = first_planes, kernel_size=(1,3), padding=(0,1), bias=False)
self.bn_dw_h = norm_layer(first_planes)
self.conv_dw_v = nn.Conv2d(first_planes, first_planes, groups = first_planes, kernel_size=(3,1), padding=(1,0), bias=False)
self.bn_dw_v = norm_layer(first_planes)
self.act_dw = act_layer(inplace=True)
self.conv_pwl = nn.Conv2d(first_planes, outplanes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_pwl = norm_layer(outplanes)
self.conv_dwl = nn.Conv2d(outplanes, outplanes, groups = outplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_dwl = norm_layer(outplanes)
self.conv_dwl_h = nn.Conv2d(outplanes, outplanes, groups = outplanes, kernel_size=(1,3), padding=(0,1), bias=False)
self.bn_dwl_h = norm_layer(outplanes)
self.conv_dwl_v = nn.Conv2d(outplanes, outplanes, groups = outplanes, kernel_size=(3,1), padding=(1,0), bias=False)
self.bn_dwl_v = norm_layer(outplanes)
def forward(self, x):
residual = x
x1 = self.conv_dw(x)
x1 = self.bn_dw(x1)
x2 = self.conv_dw_h(x)
x2 = self.bn_dw_h(x2)
x3 = self.conv_dw_v(x)
x3 = self.bn_dw_v(x3)
x = x1+x2+x3
x = self.act_dw(x)
x = self.conv_pwl(x)
x = self.bn_pwl(x)
x1 = self.conv_dwl(x)
x1 = self.bn_dwl(x1)
x2 = self.conv_dwl_h(x)
x2 = self.bn_dwl_h(x2)
x3 = self.conv_dwl_v(x)
x3 = self.bn_dwl_v(x3)
x = x1+x2+x3
x = x + residual
return x
class ExpandBlock(nn.Module):
def __init__(self, inplanes, first_planes, outplanes, stride=1, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,):
super(ExpandBlock, self).__init__()
self.conv_pw = nn.Conv2d(inplanes, first_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_pw = norm_layer(first_planes)
self.act_pw = act_layer(inplace=True)
self.conv_dw = nn.Conv2d(first_planes, first_planes, groups = first_planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_dw = norm_layer(first_planes)
self.conv_dw_h = nn.Conv2d(first_planes, first_planes, groups = first_planes, kernel_size=(1,3), stride=stride, padding=(0,1), bias=False)
self.bn_dw_h = norm_layer(first_planes)
self.conv_dw_v = nn.Conv2d(first_planes, first_planes, groups = first_planes, kernel_size=(3,1), stride=stride, padding=(1,0), bias=False)
self.bn_dw_v = norm_layer(first_planes)
self.act_dw = act_layer(inplace=True)
self.conv_pwl = nn.Conv2d(first_planes, outplanes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_pwl = norm_layer(outplanes)
self.conv_dwl = nn.Conv2d(outplanes, outplanes, groups = outplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_dwl = norm_layer(outplanes)
self.conv_dwl_h = nn.Conv2d(outplanes, outplanes, groups = outplanes, kernel_size=(1,3), padding=(0,1), bias=False)
self.bn_dwl_h = norm_layer(outplanes)
self.conv_dwl_v = nn.Conv2d(outplanes, outplanes, groups = outplanes, kernel_size=(3,1), padding=(1,0), bias=False)
self.bn_dwl_v = norm_layer(outplanes)
def forward(self, x):
x = self.conv_pw(x)
x = self.bn_pw(x)
x = self.act_pw(x)
x1 = self.conv_dw(x)
x1 = self.bn_dw(x1)
x2 = self.conv_dw_h(x)
x2 = self.bn_dw_h(x2)
x3 = self.conv_dw_v(x)
x3 = self.bn_dw_v(x3)
x = x1+x2+x3
x = self.act_dw(x)
x = self.conv_pwl(x)
x = self.bn_pwl(x)
x1 = self.conv_dwl(x)
x1 = self.bn_dwl(x1)
x2 = self.conv_dwl_h(x)
x2 = self.bn_dwl_h(x2)
x3 = self.conv_dwl_v(x)
x3 = self.bn_dwl_v(x3)
x = x1+x2+x3
return x
# class BasicBlock(nn.Module):
# expansion = 1
# def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,
# reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
# super(BasicBlock, self).__init__()
# assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
# assert base_width == 64, 'BasicBlock doest not support changing base width'
# first_planes = planes // reduce_first
# outplanes = planes * self.expansion
# first_dilation = first_dilation or dilation
# self.conv1 = nn.Conv2d(
# inplanes, first_planes, kernel_size=3, stride=stride, padding=first_dilation,
# dilation=first_dilation, bias=False)
# self.bn1 = norm_layer(first_planes)
# self.act1 = act_layer(inplace=True)
# self.conv2 = nn.Conv2d(
# first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False)
# self.bn2 = norm_layer(outplanes)
# self.act2 = act_layer(inplace=True)
# self.downsample = downsample
# self.stride = stride
# self.dilation = dilation
# def zero_init_last_bn(self):
# nn.init.zeros_(self.bn2.weight)
# def forward(self, x):
# residual = x
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.act1(x)
# x = self.conv2(x)
# x = self.bn2(x)
# if self.downsample is not None:
# residual = self.downsample(residual)
# x += residual
# x = self.act2(x)
# return x
def get_padding(kernel_size, stride, dilation=1):
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
@BACKBONES.register_module
class RegMobileNetV2(nn.Module):
def __init__(self, pretrained ='/data1/centernet/dla2.pth', block=BasicBlock, in_chans=3,
act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
super(RegMobileNetV2, self).__init__()
self.pretrained = pretrained
# self.base = DLAMain([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512],
# block=BasicBlock)
self.conv1 = nn.Conv2d(in_chans, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.conv1_h = nn.Conv2d(in_chans, 16, kernel_size=(1,3), stride=2, padding=(0,1), bias=False)
self.conv1_v = nn.Conv2d(in_chans, 16, kernel_size=(3,1), stride=2, padding=(1,0), bias=False)
self.bn1 = norm_layer(16)
self.bn1_h = norm_layer(16)
self.bn1_v = norm_layer(16)
self.act1 = act_layer(inplace=True)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2_h = nn.Conv2d(16, 32, kernel_size=(1,3), stride=1, padding=(0,1), bias=False)
self.conv2_v = nn.Conv2d(16, 32, kernel_size=(3,1), stride=1, padding=(1,0), bias=False)
self.bn2 = norm_layer(32)
self.bn2_h = norm_layer(32)
self.bn2_v = norm_layer(32)
self.act2 = act_layer(inplace=True)
self.layer3 = DlaBlock1(32,48,48)
self.layer4 = DlaBlock2(48,128,128)
self.layer5 = DlaBlock2(128,288,288)
self.layer6 = DlaBlock1(288,640,640)
self.conv6 = nn.Conv2d(640,640, kernel_size=1, stride=1, padding=0, bias=False)
self.bn6 = norm_layer(640)
self.act6 = act_layer(inplace=True)
# self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False)
# self.conv1_h = nn.Conv2d(in_chans, 32, kernel_size=(1,3), stride=2, padding=(0,1), bias=False)
# self.conv1_v = nn.Conv2d(in_chans, 32, kernel_size=(3,1), stride=2, padding=(1,0), bias=False)
# self.bn1 = norm_layer(32)
# self.bn1_h = norm_layer(32)
# self.bn1_v = norm_layer(32)
# self.act1 = act_layer(inplace=True)
# self.layer2_1 = ExpandBlock(32,80,80,stride=2)
# self.layer3_1 = ExpandBlock(80,240,240,stride=2)
# self.layer3_2 = BottleNeckBlockV2(240,240,240)
# self.layer4_1 = ExpandBlock(240,528,528,stride=2)
# self.layer4_2 = BottleNeckBlockV2(528,528,528)
# self.layer4_3 = BottleNeckBlockV2(528,528,528)
# self.layer4_4 = BottleNeckBlockV2(528,528,528)
# self.layer5_1 = ExpandBlock(528,1200,1200,stride=2)
# self.layer5_2 = BottleNeckBlockV2(1200,1200,1200)
# self.conv6 = nn.Conv2d(1200,1200, kernel_size=1, stride=1, padding=0, bias=False)
# self.bn6 = norm_layer(1200)
# self.act6 = act_layer(inplace=True)
# self.l1 = ConvBnRelu(80, 64, 64)
# self.l2 = ConvBnRelu(240, 128, 128)
# self.l3 = ConvBnRelu(528, 256, 256)
# self.l4 = ConvBnRelu(1200, 512, 512)
self.level4 = ConvBnRelu(640, 288, 288)
self.up4 = nn.ConvTranspose2d(288,288,4,stride=2,padding=1,output_padding=0,groups=288,bias=False)
self.level3_1 = ConvBnRelu(288, 288, 288)
self.level3_2 = ConvBnRelu(288, 48, 48)
self.up3_1 = self.up3_1 = nn.ConvTranspose2d(48,48,8,stride=4,padding=2,output_padding=0,groups=48,bias=False)
self.level3_3 = ConvBnRelu(288, 128, 128)
self.up3_2 = nn.ConvTranspose2d(128,128,4,stride=2,padding=1,output_padding=0,groups=128,bias=False)
self.level3_4 = ConvBnRelu(288, 128, 128)
self.up3_3 = nn.ConvTranspose2d(128,128,4,stride=2,padding=1,output_padding=0,groups=128,bias=False)
self.level2_1 = ConvBnRelu(128, 128, 128)
self.level2_2 = ConvBnRelu(128, 128, 128)
self.level2_3 = ConvBnRelu(128, 48, 48)
self.level2_4 = ConvBnRelu(128, 48, 48)
self.level2_5 = ConvBnRelu(128, 48, 48)
self.level2_6 = ConvBnRelu(128, 48, 48)
self.up2_1 = nn.ConvTranspose2d(48,48,4,stride=2,padding=1,output_padding=0,groups=48,bias=False)
self.up2_2 = nn.ConvTranspose2d(48,48,4,stride=2,padding=1,output_padding=0,groups=48,bias=False)
self.up2_3 = nn.ConvTranspose2d(48,48,4,stride=2,padding=1,output_padding=0,groups=48,bias=False)
self.up2_4 = nn.ConvTranspose2d(48,48,4,stride=2,padding=1,output_padding=0,groups=48,bias=False)
self.level1_1 = ConvBnRelu(48, 48, 48)
self.level1_2 = ConvBnRelu(48, 48, 48)
self.level1_3 = ConvBnRelu(48, 48, 48)
self.level1_4 = ConvBnRelu(48, 48, 48)
self.level1_5 = ConvBnRelu(48, 48, 48)
# channels = [64, 128, 256, 512]
# scales = [1,2,4,8]
# self.dla_up = DLAUp(0, channels,scales)
# self.ida_up = IDAUp(64, [64, 128, 256],[1,2,4])
for n, m in self.named_modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1.)
# nn.init.constant_(m.bias, 0.)
if isinstance(m, nn.ConvTranspose2d):
fill_up_weights(m)
# for m in [self.up4, self.up3_1, self.up3_2, self.up3_3, self.up2_1, self.up2_2, self.up2_3, self.up2_4]:
# m.eval()
# for param in m.parameters():
# param.requires_grad = False
self.load_pretrained_model()
#print(self.state_dict()['conv6.weight'])
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, reduce_first=1,
avg_down=False, down_kernel_size=1, **kwargs):
downsample = None
first_dilation = 1 if dilation in (1, 2) else 2
if stride != 1 or self.inplanes != planes * block.expansion:
downsample_args = dict(
in_channels=self.inplanes, out_channels=planes * block.expansion, kernel_size=down_kernel_size,
stride=stride, dilation=dilation, first_dilation=first_dilation, norm_layer=kwargs.get('norm_layer'))
downsample = downsample_avg(**downsample_args) if avg_down else downsample_conv(**downsample_args)
block_kwargs = dict(
cardinality=self.cardinality, base_width=self.base_width, reduce_first=reduce_first,
dilation=dilation, **kwargs)
layers = | |
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = baremetalsolution.ListSnapshotSchedulePoliciesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_snapshot_schedule_policies,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListSnapshotSchedulePoliciesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_snapshot_schedule_policy(
self,
request: Union[baremetalsolution.GetSnapshotSchedulePolicyRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> baremetalsolution.SnapshotSchedulePolicy:
r"""Get details of a single snapshot schedule policy.
.. code-block:: python
from google.cloud import bare_metal_solution_v2
async def sample_get_snapshot_schedule_policy():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionAsyncClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.GetSnapshotSchedulePolicyRequest(
name="name_value",
)
# Make the request
response = await client.get_snapshot_schedule_policy(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.GetSnapshotSchedulePolicyRequest, dict]):
The request object. Message for requesting snapshot
schedule policy information.
name (:class:`str`):
Required. Name of the resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.types.SnapshotSchedulePolicy:
A snapshot schedule policy.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = baremetalsolution.GetSnapshotSchedulePolicyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_snapshot_schedule_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def create_snapshot_schedule_policy(
self,
request: Union[
baremetalsolution.CreateSnapshotSchedulePolicyRequest, dict
] = None,
*,
parent: str = None,
snapshot_schedule_policy: baremetalsolution.SnapshotSchedulePolicy = None,
snapshot_schedule_policy_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> baremetalsolution.SnapshotSchedulePolicy:
r"""Create a snapshot schedule policy in the specified
project.
.. code-block:: python
from google.cloud import bare_metal_solution_v2
async def sample_create_snapshot_schedule_policy():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionAsyncClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.CreateSnapshotSchedulePolicyRequest(
parent="parent_value",
snapshot_schedule_policy_id="snapshot_schedule_policy_id_value",
)
# Make the request
response = await client.create_snapshot_schedule_policy(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.CreateSnapshotSchedulePolicyRequest, dict]):
The request object. Message for creating a snapshot
schedule policy in a project.
parent (:class:`str`):
Required. The parent project and
location containing the
SnapshotSchedulePolicy.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
snapshot_schedule_policy (:class:`google.cloud.bare_metal_solution_v2.types.SnapshotSchedulePolicy`):
Required. The SnapshotSchedulePolicy
to create.
This corresponds to the ``snapshot_schedule_policy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
snapshot_schedule_policy_id (:class:`str`):
Required. Snapshot policy ID
This corresponds to the ``snapshot_schedule_policy_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.types.SnapshotSchedulePolicy:
A snapshot schedule policy.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[parent, snapshot_schedule_policy, snapshot_schedule_policy_id]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = baremetalsolution.CreateSnapshotSchedulePolicyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if snapshot_schedule_policy is not None:
request.snapshot_schedule_policy = snapshot_schedule_policy
if snapshot_schedule_policy_id is not None:
request.snapshot_schedule_policy_id = snapshot_schedule_policy_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_snapshot_schedule_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_snapshot_schedule_policy(
self,
request: Union[
baremetalsolution.UpdateSnapshotSchedulePolicyRequest, dict
] = None,
*,
snapshot_schedule_policy: baremetalsolution.SnapshotSchedulePolicy = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> baremetalsolution.SnapshotSchedulePolicy:
r"""Update a snapshot schedule policy in the specified
project.
.. code-block:: python
from google.cloud import bare_metal_solution_v2
async def sample_update_snapshot_schedule_policy():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionAsyncClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.UpdateSnapshotSchedulePolicyRequest(
)
# Make the request
response = await client.update_snapshot_schedule_policy(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.UpdateSnapshotSchedulePolicyRequest, dict]):
The request object. Message for updating a snapshot
schedule policy in a project.
snapshot_schedule_policy (:class:`google.cloud.bare_metal_solution_v2.types.SnapshotSchedulePolicy`):
Required. The snapshot schedule policy to update.
The ``name`` field is used to identify the snapshot
schedule policy to update. Format:
projects/{project}/locations/global/snapshotSchedulePolicies/{policy}
This corresponds to the ``snapshot_schedule_policy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. The list of fields to
update.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.types.SnapshotSchedulePolicy:
A snapshot schedule policy.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([snapshot_schedule_policy, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = baremetalsolution.UpdateSnapshotSchedulePolicyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if snapshot_schedule_policy is not None:
request.snapshot_schedule_policy = snapshot_schedule_policy
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_snapshot_schedule_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
(
"snapshot_schedule_policy.name",
request.snapshot_schedule_policy.name,
),
)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_snapshot_schedule_policy(
self,
request: Union[
baremetalsolution.DeleteSnapshotSchedulePolicyRequest, dict
] = None,
*,
name: str = None,
retry: | |
here')
assert 0, self.agent.message
# TODO: eat directly from ground if possible
item = self.move_to_inventory(item)
assert item in self.items.all_items, item or item in self.items_below_me
letter = self.items.get_letter(item)
with self.agent.atom_operation():
if quaff:
def text_gen():
if self.agent.message.startswith('Drink from the fountain?'):
yield 'n'
self.agent.step(A.Command.QUAFF, text_gen())
else:
self.agent.step(A.Command.EAT)
if item in self.items.all_items:
while re.search('There (is|are)[a-zA-Z0-9- ]* here; eat (it|one)\?', self.agent.message):
self.agent.type_text('n')
self.agent.type_text(letter)
return True
elif item in self.items_below_me:
while ' eat it? [ynq]' in self.agent.message or \
' eat one? [ynq]' in self.agent.message:
if item.text in self.agent.message:
self.type_text('y')
return True
if "What do you want to eat?" in self.agent.message or \
"You don't have anything to eat." in self.agent.message:
raise AgentPanic('no food is lying here')
assert 0, self.agent.message
assert 0
######## STRATEGIES helpers
def get_best_melee_weapon(self, items=None, *, return_dps=False, allow_unknown_status=False):
if self.agent.character.role == Character.MONK:
return None
if items is None:
items = self.items
# select the best
best_item = None
best_dps = utils.calc_dps(*self.agent.character.get_melee_bonus(None, large_monster=False))
for item in flatten_items(items):
if item.is_weapon() and \
(item.status in [Item.UNCURSED, Item.BLESSED] or
(allow_unknown_status and item.status == Item.UNKNOWN)):
to_hit, dmg = self.agent.character.get_melee_bonus(item, large_monster=False)
dps = utils.calc_dps(to_hit, dmg)
# dps = item.get_dps(large_monster=False) # TODO: what about monster size
if best_dps < dps:
best_dps = dps
best_item = item
if return_dps:
return best_item, best_dps
return best_item
def get_ranged_combinations(self, items=None, throwing=True, allow_best_melee=False, allow_wielded_melee=False,
allow_unknown_status=False, additional_ammo=[]):
if items is None:
items = self.items
items = flatten_items(items)
launchers = [i for i in items if i.is_launcher()]
ammo_list = [i for i in items if i.is_fired_projectile()]
valid_combinations = []
# TODO: should this condition be used here
if any(l.equipped and l.status == Item.CURSED for l in launchers):
launchers = [l for l in launchers if l.equipped]
for launcher in launchers:
for ammo in ammo_list + additional_ammo:
if ammo.is_fired_projectile(launcher):
if launcher.status in [Item.UNCURSED, Item.BLESSED] or \
(allow_unknown_status and launcher.status == Item.UNKNOWN):
valid_combinations.append((launcher, ammo))
if throwing:
best_melee_weapon = None
if not allow_best_melee:
best_melee_weapon = self.get_best_melee_weapon()
wielded_melee_weapon = None
if not allow_wielded_melee:
wielded_melee_weapon = self.items.main_hand
valid_combinations.extend([(None, i) for i in items
if i.is_thrown_projectile()
and i != best_melee_weapon and i != wielded_melee_weapon])
return valid_combinations
def get_best_ranged_set(self, items=None, *, throwing=True, allow_best_melee=False,
allow_wielded_melee=False,
return_dps=False, allow_unknown_status=False, additional_ammo=[]):
if items is None:
items = self.items
items = flatten_items(items)
best_launcher, best_ammo = None, None
best_dps = -float('inf')
for launcher, ammo in self.get_ranged_combinations(items, throwing, allow_best_melee, allow_wielded_melee,
allow_unknown_status, additional_ammo):
to_hit, dmg = self.agent.character.get_ranged_bonus(launcher, ammo)
dps = utils.calc_dps(to_hit, dmg)
if dps > best_dps:
best_launcher, best_ammo, best_dps = launcher, ammo, dps
if return_dps:
return best_launcher, best_ammo, best_dps
return best_launcher, best_ammo
def get_best_armorset(self, items=None, *, return_ac=False, allow_unknown_status=False):
if items is None:
items = self.items
items = flatten_items(items)
best_items = [None] * O.ARM_NUM
best_ac = [None] * O.ARM_NUM
for item in items:
if not item.is_armor() or not item.is_unambiguous():
continue
# TODO: consider other always allowed items than dragon hide
is_dragonscale_armor = item.object.metal == O.DRAGON_HIDE
allowed_statuses = [Item.UNCURSED, Item.BLESSED] + ([Item.UNKNOWN] if allow_unknown_status else [])
if item.status not in allowed_statuses and not is_dragonscale_armor:
continue
slot = item.object.sub
ac = item.get_ac()
if self.agent.character.role == Character.MONK and slot == O.ARM_SUIT:
continue
if best_ac[slot] is None or best_ac[slot] > ac:
best_ac[slot] = ac
best_items[slot] = item
if return_ac:
return best_items, best_ac
return best_items
######## LOW-LEVEL STRATEGIES
def gather_items(self):
return (
self.pickup_and_drop_items()
.before(self.check_containers())
.before(self.wear_best_stuff())
.before(self.wand_engrave_identify())
.before(self.go_to_unchecked_containers())
.before(self.check_items()
.before(self.go_to_item_to_pickup()).repeat().every(5)
.preempt(self.agent, [
self.pickup_and_drop_items(),
self.check_containers(),
])).repeat()
)
@utils.debug_log('inventory.arrange_items')
@Strategy.wrap
def arrange_items(self):
yielded = False
if self.agent.character.prop.polymorph:
# TODO: only handless
yield False
while 1:
items_below_me = list(filter(lambda i: i.shop_status == Item.NOT_SHOP, flatten_items(self.items_below_me)))
forced_items = list(filter(lambda i: not i.can_be_dropped_from_inventory(), flatten_items(self.items)))
assert all((item in self.items.all_items for item in forced_items))
free_items = list(filter(lambda i: i.can_be_dropped_from_inventory(),
flatten_items(sorted(self.items, key=lambda x: x.text))))
all_items = free_items + items_below_me
item_split = self.agent.global_logic.item_priority.split(
all_items, forced_items, self.agent.character.carrying_capacity)
assert all((container is None or container in self.items_below_me or container in self.items.all_items or \
(sum(item_split[container]) == 0 and not container.content.items)
for container in item_split)), 'TODO: nested containers'
cont = False
# put into containers
for container in item_split:
if container is not None:
counts = item_split[container]
indices = [i for i, item in enumerate(all_items) if item in self.items.all_items and counts[i] > 0]
if not indices:
continue
if not yielded:
yielded = True
yield True
self.use_container(container, [all_items[i] for i in indices], [],
items_to_put_counts=[counts[i] for i in indices])
cont = True
break
if cont:
continue
# drop on ground
counts = item_split[None]
indices = [i for i, item in enumerate(free_items) if
item in self.items.all_items and counts[i] != item.count]
if indices:
if not yielded:
yielded = True
yield True
assert self.drop([free_items[i] for i in indices], [free_items[i].count - counts[i] for i in indices],
smart=False)
continue
# take from container
for container in all_items:
if not container.is_container():
continue
if container in item_split:
counts = item_split[container]
indices = [i for i, item in enumerate(all_items) if
item in container.content.items and counts[i] != item.count]
items_to_take_counts = [all_items[i].count - counts[i] for i in indices]
else:
counts = np.array(list(item_split.values())).sum(0)
indices = [i for i, item in enumerate(all_items) if
item in container.content.items and counts[i] != 0]
items_to_take_counts = [counts[i] for i in indices]
if not indices:
continue
if not yielded:
yielded = True
yield True
assert self.items.free_slots() > 0
indices = indices[:self.items.free_slots()]
self.use_container(container, [], [all_items[i] for i in indices],
items_to_take_counts=items_to_take_counts)
cont = True
break
if cont:
continue
# pick up from ground
to_pickup = np.array([counts[len(free_items):] for counts in item_split.values()]).sum(0)
assert len(to_pickup) == len(items_below_me)
indices = [i for i, item in enumerate(items_below_me) if to_pickup[i] > 0 and item in self.items_below_me]
if len(indices) > 0:
assert self.items.free_slots() > 0
indices = indices[:self.items.free_slots()]
if not yielded:
yielded = True
yield True
assert self.pickup([items_below_me[i] for i in indices], [to_pickup[i] for i in indices])
continue
break
for container in item_split:
for item, count in zip(all_items, item_split[container]):
assert count == 0 or count == item.count
assert count == 0 or item in (
container.content.items if container is not None else self.items.all_items)
if not yielded:
yield False
def _determine_possible_wands(self, message, item):
wand_regex = '[a-zA-Z ]+'
floor_regex = '[a-zA-Z]+'
mapping = {
f"The engraving on the {floor_regex} vanishes!": ['cancellation', 'teleportation', 'make invisible'],
# TODO?: cold, # (if the existing engraving is a burned one)
"A few ice cubes drop from the wand.": ['cold'],
f"The bugs on the {floor_regex} stop moving": ['death', 'sleep'],
f"This {wand_regex} is a wand of digging!": ['digging'],
"Gravel flies up from the floor!": ['digging'],
f"This {wand_regex} is a wand of fire!": ['fire'],
"Lightning arcs from the wand. You are blinded by the flash!": ['lighting'],
f"This {wand_regex} is a wand of lightning!": ['lightning'],
f"The {floor_regex} is riddled by bullet holes!": ['magic missile'],
f'The engraving now reads:': ['polymorph'],
f"The bugs on the {floor_regex} slow down!": ['slow monster'],
f"The bugs on the {floor_regex} speed up!": ['speed monster'],
"The wand unsuccessfully fights your attempt to write!": ['striking'],
# activated effects:
"A lit field surrounds you!": ['light'],
"You may wish for an object.": ['wishing'],
"You feel self-knowledgeable...": ['enlightenment'] # TODO: parse the effect
# TODO: "The wand is too worn out to engrave.": [None], # wand is exhausted
}
for msg, wand_types in mapping.items():
res = re.findall(msg, message)
if len(res) > 0:
assert len(res) == 1
return [O.from_name(w, nh.WAND_CLASS) for w in wand_types]
# TODO: "wand is cancelled (x:-1)" ?
# TODO: "secret door detection self-identifies if secrets are detected" ?
res = re.findall(f'Your {wand_regex} suddenly explodes!', self.agent.message)
if len(res) > 0:
assert len(res) == 1
return None
res = re.findall('The wand is too worn out to engrave.', self.agent.message)
if len(res) > 0:
assert len(res) == 1
self.agent.inventory.call_item(item, 'EMPT')
return None
res = re.findall(f'{wand_regex} glows, then fades.', self.agent.message)
if len(res) > 0:
assert len(res) == 1
return [p for p in O.possibilities_from_glyph(item.glyphs[0])
if p.name not in ['light', 'wishing']]
# TODO: wiki says this:
# return [O.from_name('opening', nh.WAND_CLASS),
# O.from_name('probing', nh.WAND_CLASS),
# O.from_name('undead turning', nh.WAND_CLASS),
# O.from_name('nothing', nh.WAND_CLASS),
# O.from_name('secret door detection', nh.WAND_CLASS),
# ]
assert 0, message
@utils.debug_log('inventory.wand_engrave_identify')
@Strategy.wrap
def wand_engrave_identify(self):
if self.agent.character.prop.polymorph:
yield False # TODO: only for handless monsters (which cannot write)
self.skip_engrave_counter -= 1
if self.agent.character.prop.blind or self.skip_engrave_counter > 0:
yield | |
= context.setdefault('email', self.email_address)
if not email:
return 0 # Not Sent
langs = i18n.parse_accept_lang(self.email_lang or 'en')
locale = i18n.match_lang(langs)
i18n.add_helpers_to_context(self._tell_sentry, context, locale)
context['escape'] = lambda s: s
context_html = dict(context)
i18n.add_helpers_to_context(self._tell_sentry, context_html, locale)
context_html['escape'] = htmlescape
spt = self._emails[spt_name]
base_spt = self._emails['base']
def render(t, context):
b = base_spt[t].render(context).strip()
return b.replace('$body', spt[t].render(context).strip())
message = {}
message['Source'] = 'Gratipay Support <<EMAIL>>'
message['Destination'] = {}
message['Destination']['ToAddresses'] = ["%s <%s>" % (self.username, email)] # "Name <<EMAIL>>"
message['Message'] = {}
message['Message']['Subject'] = {}
message['Message']['Subject']['Data'] = spt['subject'].render(context).strip()
message['Message']['Body'] = {
'Text': {
'Data': render('text/plain', context)
},
'Html': {
'Data': render('text/html', context_html)
}
}
self._mailer.send_email(**message)
return 1 # Sent
def queue_email(self, spt_name, **context):
self.db.run("""
INSERT INTO email_queue
(participant, spt_name, context)
VALUES (%s, %s, %s)
""", (self.id, spt_name, pickle.dumps(context)))
@classmethod
def dequeue_emails(cls):
fetch_messages = lambda: cls.db.all("""
SELECT *
FROM email_queue
ORDER BY id ASC
LIMIT 60
""")
while True:
messages = fetch_messages()
if not messages:
break
for msg in messages:
p = cls.from_id(msg.participant)
r = p.send_email(msg.spt_name, **pickle.loads(msg.context))
cls.db.run("DELETE FROM email_queue WHERE id = %s", (msg.id,))
if r == 1:
sleep(1)
def set_email_lang(self, accept_lang):
if not accept_lang:
return
self.db.run("UPDATE participants SET email_lang=%s WHERE id=%s",
(accept_lang, self.id))
self.set_attributes(email_lang=accept_lang)
# Notifications
# =============
def add_notification(self, name):
id = self.id
r = self.db.one("""
UPDATE participants
SET notifications = array_append(notifications, %(name)s)
WHERE id = %(id)s
AND NOT %(name)s = ANY(notifications);
SELECT notifications
FROM participants
WHERE id = %(id)s;
""", locals())
self.set_attributes(notifications=r)
def add_signin_notifications(self):
if not self.get_emails():
self.add_notification('email_missing')
if self.get_paypal_error():
self.add_notification('paypal_withdrawal_failed')
if self.get_credit_card_error():
self.add_notification('credit_card_failed')
elif self.credit_card_expiring():
self.add_notification('credit_card_expires')
def credit_card_expiring(self):
route = ExchangeRoute.from_network(self, 'braintree-cc')
if not route:
return
card = CreditCard.from_route(route)
year, month = card.expiration_year, card.expiration_month
if not (year and month):
return False
return is_card_expiring(int(year), int(month))
def remove_notification(self, name):
id = self.id
r = self.db.one("""
UPDATE participants
SET notifications = array_remove(notifications, %(name)s)
WHERE id = %(id)s
RETURNING notifications
""", locals())
self.set_attributes(notifications=r)
def render_notifications(self, state):
r = []
escape = state['escape']
state['escape'] = lambda a: a
for name in self.notifications:
try:
f = getattr(notifications, name)
typ, msg = f(*resolve_dependencies(f, state).as_args)
r.append(dict(jsonml=msg, name=name, type=typ))
except Exception as e:
self._tell_sentry(e, state)
state['escape'] = escape
return r
# Exchange-related stuff
# ======================
def get_paypal_error(self):
return getattr(ExchangeRoute.from_network(self, 'paypal'), 'error', None)
def get_credit_card_error(self):
return getattr(ExchangeRoute.from_network(self, 'braintree-cc'), 'error', None)
def get_cryptocoin_addresses(self):
routes = self.db.all("""
SELECT network, address
FROM current_exchange_routes r
WHERE participant = %s
AND network = 'bitcoin'
AND error <> 'invalidated'
""", (self.id,))
return {r.network: r.address for r in routes}
@property
def has_payout_route(self):
for network in ('paypal',):
route = ExchangeRoute.from_network(self, network)
if route and not route.error:
return True
return False
def get_balanced_account(self):
"""Fetch or create the balanced account for this participant.
"""
if not self.balanced_customer_href:
customer = balanced.Customer(meta={
'username': self.username,
'participant_id': self.id,
}).save()
r = self.db.one("""
UPDATE participants
SET balanced_customer_href=%s
WHERE id=%s
AND balanced_customer_href IS NULL
RETURNING id
""", (customer.href, self.id))
if not r:
return self.get_balanced_account()
else:
customer = balanced.Customer.fetch(self.balanced_customer_href)
return customer
def get_braintree_account(self):
"""Fetch or create a braintree account for this participant.
"""
if not self.braintree_customer_id:
customer = braintree.Customer.create({
'custom_fields': {'participant_id': self.id}
}).customer
r = self.db.one("""
UPDATE participants
SET braintree_customer_id=%s
WHERE id=%s
AND braintree_customer_id IS NULL
RETURNING id
""", (customer.id, self.id))
if not r:
return self.get_braintree_account()
else:
customer = braintree.Customer.find(self.braintree_customer_id)
return customer
def get_braintree_token(self):
account = self.get_braintree_account()
token = braintree.ClientToken.generate({'customer_id': account.id})
return token
# Elsewhere-related stuff
# =======================
def get_account_elsewhere(self, platform):
"""Return an AccountElsewhere instance.
"""
return self.db.one("""
SELECT elsewhere.*::elsewhere_with_participant
FROM elsewhere
WHERE participant=%s
AND platform=%s
""", (self.username, platform))
def get_accounts_elsewhere(self):
"""Return a dict of AccountElsewhere instances.
"""
accounts = self.db.all("""
SELECT elsewhere.*::elsewhere_with_participant
FROM elsewhere
WHERE participant=%s
""", (self.username,))
accounts_dict = {account.platform: account for account in accounts}
return accounts_dict
def get_elsewhere_logins(self, cursor):
"""Return the list of (platform, user_id) tuples that the participant
can log in with.
"""
return cursor.all("""
SELECT platform, user_id
FROM elsewhere
WHERE participant=%s
AND platform IN %s
AND NOT is_team
""", (self.username, AccountElsewhere.signin_platforms_names))
def delete_elsewhere(self, platform, user_id):
"""Deletes account elsewhere unless the user would not be able
to log in anymore.
"""
user_id = unicode(user_id)
with self.db.get_cursor() as c:
accounts = self.get_elsewhere_logins(c)
assert len(accounts) > 0
if len(accounts) == 1 and accounts[0] == (platform, user_id):
raise LastElsewhere()
c.one("""
DELETE FROM elsewhere
WHERE participant=%s
AND platform=%s
AND user_id=%s
RETURNING participant
""", (self.username, platform, user_id), default=NonexistingElsewhere)
add_event(c, 'participant', dict(id=self.id, action='disconnect', values=dict(platform=platform, user_id=user_id)))
self.update_avatar()
def update_avatar(self):
avatar_url = self.db.run("""
UPDATE participants p
SET avatar_url = (
SELECT avatar_url
FROM elsewhere
WHERE participant = p.username
ORDER BY platform = 'github' DESC,
avatar_url LIKE '%%gravatar.com%%' DESC
LIMIT 1
)
WHERE p.username = %s
RETURNING avatar_url
""", (self.username,))
self.set_attributes(avatar_url=avatar_url)
# Giving and Taking
# =================
def set_payment_instruction(self, team, amount, update_self=True, update_team=True,
cursor=None):
"""Given a Team instance, and amount as str, return a dict.
We INSERT instead of UPDATE, so that we have history to explore. The
COALESCE function returns the first of its arguments that is not NULL.
The effect here is to stamp all payment instructions with the timestamp
of the first instruction from this ~user to that Team. I believe this
is used to determine the order of payments during payday.
The dict returned represents the row inserted in the payment_instructions
table.
"""
assert self.is_claimed # sanity check
amount = Decimal(amount) # May raise InvalidOperation
if (amount < gratipay.MIN_PAYMENT) or (amount > gratipay.MAX_PAYMENT):
raise BadAmount
# Insert payment instruction
NEW_PAYMENT_INSTRUCTION = """\
INSERT INTO payment_instructions
(ctime, participant_id, team_id, amount)
VALUES ( COALESCE (( SELECT ctime
FROM payment_instructions
WHERE ( participant_id=%(participant_id)s
AND team_id=%(team_id)s
)
LIMIT 1
), CURRENT_TIMESTAMP)
, %(participant_id)s, %(team_id)s, %(amount)s
)
RETURNING *
"""
args = dict(participant_id=self.id, team_id=team.id, amount=amount)
t = (cursor or self.db).one(NEW_PAYMENT_INSTRUCTION, args)
t_dict = t._asdict()
if amount > 0:
# Carry over any existing due
self._update_due(t_dict['team_id'], t_dict['id'], cursor)
else:
self._reset_due(t_dict['team_id'], cursor=cursor)
if update_self:
# Update giving amount of participant
self.update_giving(cursor)
if update_team:
# Update receiving amount of team
team.update_receiving(cursor)
if team.slug == 'Gratipay':
# Update whether the participant is using Gratipay for free
self.update_is_free_rider(None if amount == 0 else False, cursor)
return t._asdict()
def get_payment_instruction(self, team):
"""Given a Team instance, return a dict.
"""
default = dict(amount=Decimal('0.00'), is_funded=False)
return self.db.one("""\
SELECT *
FROM payment_instructions
WHERE participant_id=%s
AND team_id=%s
ORDER BY mtime DESC
LIMIT 1
""", (self.id, team.id), back_as=dict, default=default)
def get_due(self, team):
"""Given a Team instance, return a Decimal.
"""
return self.db.one("""\
SELECT due
FROM current_payment_instructions
WHERE participant_id = %s
AND team_id = %s
""", (self.id, team.id))
def get_giving_for_profile(self):
"""Return a list and a Decimal.
"""
GIVING = """\
SELECT * FROM (
SELECT DISTINCT ON (pi.team_id)
t.slug AS team_slug
, pi.amount
, pi.due
, pi.ctime
, pi.mtime
, t.name AS team_name
FROM payment_instructions pi
JOIN teams t ON pi.team_id = t.id
WHERE participant_id = %s
AND t.is_approved is true
AND t.is_closed is not true
ORDER BY pi.team_id
, pi.mtime DESC
) AS foo
ORDER BY amount DESC
, team_slug
"""
giving = self.db.all(GIVING, (self.id,))
# Compute the totals.
# ==================
totals = {
'amount': sum([rec.amount for rec in giving]) or Decimal('0.00'),
'due': sum([rec.due for rec in giving]) or Decimal('0.00')
}
return giving, totals
def get_old_stats(self):
"""Returns a tuple: (sum, number) of old-style 1.0 tips.
"""
return self.db.one("""
SELECT sum(amount), count(amount)
FROM current_tips
JOIN participants p ON p.username = tipper
WHERE tippee = %s
AND p.claimed_time IS NOT null
AND p.is_suspicious IS NOT true
AND p.is_closed IS NOT true
AND is_funded
AND amount > 0
""", (self.username,))
def update_giving_and_teams(self):
with self.db.get_cursor() as cursor:
updated_giving = self.update_giving(cursor)
for payment_instruction in updated_giving:
Team.from_id(payment_instruction.team_id).update_receiving(cursor)
def update_giving(self, cursor=None):
# Update is_funded on payment_instructions
has_credit_card = self.get_credit_card_error() == ''
updated = (cursor or self.db).all("""
UPDATE payment_instructions
SET is_funded = %(has_credit_card)s
WHERE participant_id = %(participant_id)s
AND is_funded <> %(has_credit_card)s
RETURNING *
""", dict(participant_id=self.id, has_credit_card=has_credit_card))
r = (cursor or self.db).one("""
WITH pi AS (
SELECT amount
FROM current_payment_instructions cpi
JOIN teams t ON t.id = cpi.team_id
WHERE participant_id = %(participant_id)s
AND amount > 0
AND is_funded
AND t.is_approved
)
UPDATE participants p
SET giving = COALESCE((SELECT sum(amount) FROM pi), 0)
, ngiving_to = COALESCE((SELECT | |
import ast
import re
import json
import jwt
import requests
from django.contrib.gis.db.models.functions import AsGeoJSON
from django.contrib.gis.gdal import SpatialReference
from django.db.models.base import ModelBase
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
# Create your views here.
from requests import ConnectionError
from requests import HTTPError
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.gis.geos import GEOSGeometry, GeometryCollection
from hyper_resource.contexts import *
from rest_framework.negotiation import BaseContentNegotiation
from django.contrib.gis.db import models
from abc import ABCMeta, abstractmethod
from hyper_resource.models import FactoryComplexQuery, OperationController, BusinessModel, ConverterType
SECRET_KEY = <KEY>'
class IgnoreClientContentNegotiation(BaseContentNegotiation):
def select_parser(self, request, parsers):
"""
Select the first parser in the `.parser_classes` list.
"""
return parsers[0]
def select_renderer(self, request, renderers, format_suffix):
"""
Select the first renderer in the `.renderer_classes` list.
"""
return (renderers[0], renderers[0].media_type)
class BaseContext(object):
def __init__(self, contextclassname, serializer_object=None):
self.serializer_object = serializer_object
self.contextclassname = contextclassname
def options(self, request):
response = Response(self.getContextData(request), status=status.HTTP_200_OK, content_type="application/ld+json")
response = self.createLinkOfContext(request, response)
return response
def addContext(self, request, response):
return self.createLinkOfContext(request, response)
def createLinkOfContext(self, request, response, properties=None):
# if properties is None:
# url = reverse('context:detail', args=[self.contextclassname], request=request)
# else:
# url = reverse('context:detail-property', args=[self.contextclassname, ",".join(properties)], request=request)
url = request.build_absolute_uri()
url = url if url[-1] != '/' else url[:-1]
url = url + ".jsonld"
context_link = ' <'+url+'>; rel=\"http://www.w3.org/ns/json-ld#context\"; type=\"application/ld+json\" '
if "Link" not in response:
response['Link'] = context_link
else:
response['Link'] += "," + context_link
return response
def getHydraData(self, request):
#classobject = Class.objects.get(name=self.contextclassname)
#serializerHydra = HydraSerializer(classobject, request)
return {}
def addIriTamplate(self, context, request, serializer_object):
url = request.build_absolute_uri()
iriTemplate = {
"@context": "http://www.w3.org/ns/hydra/context.jsonld",
"@type": "IriTemplate",
"template": url if url[-1] != '/' else url[:-1] +"{/attribute}",
"variableRepresentation": "BasicRepresentation",
"mapping": []
}
if serializer_object is not None:
for attr in serializer_object.Meta.identifiers:
iriTemplate['mapping'].append({
"@type": "IriTemplateMapping",
"variable": "attribute",
"property": attr,
"required": True
})
else:
iriTemplate['mapping'].append({
"@type": "IriTemplateMapping",
"variable": "attribute",
"property": "hydra:supportedProperties",
"required": True
})
context['iriTemplate'] = iriTemplate
return context
def getContextData(self, request):
try:
classobject = None #Class.objects.get(name=self.contextclassname)
except:
return ""
serializer = None #ContextSerializer(classobject)
contextdata = {} #serializer.data
hydradata = self.getHydraData(request)
if "@context" in hydradata:
hydradata["@context"].update(contextdata["@context"])
contextdata.update(hydradata)
contextdata = self.addIriTamplate(contextdata, request, self.serializer_object)
return contextdata
class AbstractResource(APIView):
__metaclass__ = ABCMeta
serializer_class = None
def __init__(self):
super(AbstractResource, self).__init__()
self.current_object_state = None
self.object_model = None
self.name_of_last_operation_executed = None
self.context_resource = None
self.initialize_context()
self.iri_metadata = None
self.operation_controller = OperationController()
self.token_need = self.token_is_need()
content_negotiation_class = IgnoreClientContentNegotiation
def jwt_algorithm(self):
return 'HS256'
def token_is_ok(self, a_token):
try:
payload = jwt.decode(a_token, SECRET_KEY, algorithm=self.jwt_algorithm())
return True
except jwt.InvalidTokenError:
return False
def token_is_need(self):
return False
def add_url_in_header(self, url, response, rel):
link = ' <'+url+'>; rel=\"'+rel+'\" '
if "Link" not in response:
response['Link'] = link
else:
response['Link'] += "," + link
return response
def dispatch(self, request, *args, **kwargs):
if self.token_is_need():
http_auth = 'HTTP_AUTHORIZATION'
if http_auth in request.META and request.META[http_auth].startswith('Bearer'):
a_token = request.META['HTTP_AUTHORIZATION'][7:].strip()
if self.token_is_ok(a_token):
return super(AbstractResource, self).dispatch(request, *args, **kwargs)
resp = HttpResponse(json.dumps({"token": "token is needed or it is not ok"}), status=401, content_type='application/json')
resp['WWW-Authenticate'] = 'Bearer realm="example"'
return resp
else:
return super(AbstractResource, self).dispatch(request, *args, **kwargs)
#@abstractmethod #Could be override
def initialize_context(self):
context_module_name = self.__class__.__module__.split('.')[0] + '.contexts'
context_module = importlib.import_module(context_module_name)
context_class_name = self.__class__.__name__ + 'Context'
context_class = getattr(context_module, context_class_name )
self.context_resource = context_class()
self.context_resource.resource = self
# todo
def path_request_is_ok(self, a_path):
return True
def operations_with_parameters_type(self):
dic = self.object_model.operations_with_parameters_type()
return dic
def model_class(self):
return self.serializer_class.Meta.model #return self.object_model.model_class()
def model_class_name(self):
return self.model_class().__name__
def attribute_names_to_web(self):
return self.serializer_class.Meta.fields
def fields_to_web_for_attribute_names(self, attribute_names):
fields_model = self.object_model.fields()
# Poderia ser ModelClass._meta.get_field(field_name) Obs: raise FieldDoesNotExist
return [field for field in fields_model if field.name in attribute_names ]
def fields_to_web(self):
return self.fields_to_web_for_attribute_names(self.attribute_names_to_web())
def _base_path(self, full_path):
arr = full_path.split('/')
ind = arr.index(self.contextclassname)
return '/'.join(arr[:ind+1])
def _set_context_to_model(self):
self.context_resource.contextModel(self.object_model)
def _set_context_to_attributes(self, attribute_name_array):
self.context_resource.set_context_to_attributes(attribute_name_array)
def _set_context_to_only_one_attribute(self, attribute_name):
self.context_resource.set_context_to_only_one_attribute(self.current_object_state, attribute_name)
def _set_context_to_operation(self, operation_name):
self.context_resource.set_context_to_operation(self.current_object_state, operation_name)
def set_basic_context_resource(self, request ):
self.context_resource.host = request.META['HTTP_HOST']
self.context_resource.basic_path = self._base_path(request.META['PATH_INFO'])
if len(self.kwargs.values()):
self.context_resource.complement_path = list(self.kwargs.values())[0]
else:
self.context_resource.complement_path = ''
def key_is_identifier(self, key):
return key in self.serializer_class.Meta.identifiers
def dic_with_only_identitier_field(self, dict_params):
dic = dict_params.copy()
a_dict = {}
for key, value in dic.items():
if self.key_is_identifier(key):
a_dict[key] = value
return a_dict
'''
def get_object(self, arr_of_term=[]):
first_term = arr_of_term[0]
if self.is_attribute(self, first_term):
self.current_object_state = getattr(self.object_model, first_term, None)
arr_of_term = arr_of_term[1:]
for term in arr_of_term:
self.current_object_state = getattr(self.current_object_state, term, None)
return self.current_object_state
'''
def attributes_functions_name_template(self):
return 'attributes_functions'
def get_object(self, a_dict):
dicti = self.dic_with_only_identitier_field(a_dict)
queryset = self.model_class().objects.all()
obj = get_object_or_404(queryset, **dicti)
#self.check_object_permissions(self.request, obj)
return obj
def patch(self, request, *args, **kwargs):
return super(AbstractResource, self).patch(request, *args, **kwargs)
def head(self, request, *args, **kwargs):
resp = Response(status=status.HTTP_200_OK)
return resp
def put(self, request, *args, **kwargs):
obj = self.get_object(kwargs)
serializer = self.serializer_class(obj, data=request.data, context={'request': request})
if serializer.is_valid():
serializer.save()
resp = Response(status=status.HTTP_204_NO_CONTENT)
return resp
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, *args, **kwargs):
obj = self.get_object(kwargs)
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def operation_names_model(self):
return self.object_model.operation_names()
def attribute_names_model(self):
return self.object_model.attribute_names()
def is_private(self, attribute_or_method_name):
return attribute_or_method_name.startswith('__') and attribute_or_method_name.endswith('__')
def is_not_private(self, attribute_or_method_name):
return not self.is_private(attribute_or_method_name)
def is_operation(self, operation_name):
return operation_name in self.operation_names_model()
def is_attribute(self, attribute_name):
return self.object_model.is_attribute(attribute_name)
def is_spatial_attribute(self, attribute_name):
return False
def _has_method(self, method_name):
return method_name in self.operation_names_model()
def is_simple_path(self, attributes_functions_str):
return attributes_functions_str is None or len(attributes_functions_str) == 0
def path_has_operations(self, attributes_functions_name):
attrs_functs = attributes_functions_name.split('/')
operations = self.operation_names_model()
for att_func in attrs_functs:
if att_func in operations:
return True
return False
def path_has_only_attributes(self, attributes_functions_name):
attrs_functs = attributes_functions_name.split('/')
if len(attrs_functs) > 1:
return False
if ',' in attrs_functs[0]:
return True
if self._has_method(attrs_functs[0]):
return False
return self.object_model.is_attribute(attrs_functs[0])
def transform_path_with_url_as_array(self, arr_of_term):
arr = []
http_str = ''
arr_term = [ele for ele in arr_of_term if ele != '']
found_url = False
size_of_term = len(arr_term)
for idx, token in enumerate(arr_term):
if self.token_is_http_or_https_or_www(token.lower()):
found_url = True
if found_url:
if self.token_is_http_or_https(token):
http_str += token + '//'
elif self.is_end_of_term(token):
found_url = False
arr.append(http_str)
arr.append(token)
http_str = ''
elif (idx == size_of_term -1):
found_url = False
http_str+= token + '/'
arr.append(http_str)
http_str = ''
else:
http_str += token + '/'
else:
arr.append(token)
return arr
def attributes_functions_splitted_by_url(self, attributes_functions_str_url):
res = attributes_functions_str_url.lower().find('http:')
if res == -1:
res = attributes_functions_str_url.lower().find('https:')
if res == -1:
res = attributes_functions_str_url.lower().find('www.')
if res == -1:
return [attributes_functions_str_url]
return [attributes_functions_str_url[0:res], attributes_functions_str_url[res:]]
def path_has_url(self, attributes_functions_str_url):
return (attributes_functions_str_url.find('http:') > -1) or (attributes_functions_str_url.find('https:') > -1)\
or (attributes_functions_str_url.find('www.') > -1)
def _execute_attribute_or_method(self, object, attribute_or_method_name, array_of_attribute_or_method_name):
dic = {}
parameters = []
if OperationController().is_operation(object, attribute_or_method_name):
if OperationController().operation_has_parameters(object, attribute_or_method_name):
parameters = array_of_attribute_or_method_name[0].split('&')
array_of_attribute_or_method_name = array_of_attribute_or_method_name[1:]
obj = self._value_from_object(object, attribute_or_method_name, parameters)
if len(array_of_attribute_or_method_name) == 0:
return obj
return self._execute_attribute_or_method(obj, array_of_attribute_or_method_name[0], array_of_attribute_or_method_name[1:])
def is_operation_and_has_parameters(self, attribute_or_method_name):
dic = self.operations_with_parameters_type()
return (attribute_or_method_name in dic) and len(dic[attribute_or_method_name].parameters)
def function_name(self, attributes_functions_str):
functions_dic = self.operations_with_parameters_type()
if str(attributes_functions_str[-1]) in functions_dic:
return str(attributes_functions_str[-1])
return str(attributes_functions_str[-2])
def response_resquest_with_attributes(self, attributes_functions_name):
a_dict ={}
attributes = attributes_functions_name.strip().split(',')
#self.current_object = self.object_model
for attr_name in attributes:
obj = self._value_from_object(self.object_model, attr_name, [])
a_dict[attr_name] = obj
self.current_object_state = a_dict
return (a_dict, 'application/json', self.object_model, {'status': 200})
def all_parameters_converted(self, attribute_or_function_name, parameters):
parameters_converted = []
if self.is_operation_and_has_parameters(attribute_or_function_name):
parameters_type = self.operations_with_parameters_type()[attribute_or_function_name].parameters
for i in range(0, len(parameters)):
parameters_converted.append(parameters_type[i](parameters[i]))
return parameters_converted
return self.parametersConverted(parameters)
def is_attribute_for(self, object, attribute_or_function_name):
return hasattr(object, attribute_or_function_name) and not callable(getattr(object, attribute_or_function_name))
def _value_from_object(self, object, attribute_or_function_name, parameters):
attribute_or_function_name_striped = attribute_or_function_name.strip()
self.name_of_last_operation_executed = attribute_or_function_name_striped
if self.is_attribute_for(object, attribute_or_function_name):
return getattr(object, attribute_or_function_name_striped)
if len(parameters)> 0:
if (isinstance(object, BusinessModel) or isinstance(object, GEOSGeometry)):
params = self.all_parameters_converted(attribute_or_function_name_striped, parameters)
else:
params = ConverterType().convert_parameters(type(object), attribute_or_function_name, parameters)
return getattr(object, attribute_or_function_name_striped)(*params)
return getattr(object, attribute_or_function_name_striped)()
def parametersConverted(self, params_as_array):
paramsConveted = []
for value in params_as_array:
if value.lower() == 'true':
paramsConveted.append(True)
continue
elif value.lower() == 'false':
paramsConveted.append(False)
continue
try:
paramsConveted.append(int( value ) )
continue
except ValueError:
pass
try:
paramsConveted.append( float( value ) )
continue
except ValueError:
pass
try:
paramsConveted.append( GEOSGeometry( value ) )
continue
except ValueError:
pass
try:
http_str = (value[0:4]).lower()
if (http_str == 'http'):
resp = requests.get(value)
if 400 <= resp.status_code <= 599:
raise HTTPError({resp.status_code: resp.reason})
js = resp.json()
if (js.get("type") and js["type"].lower() in ['feature', 'featurecollection']):
a_geom = js["geometry"]
else:
a_geom = js
paramsConveted.append(GEOSGeometry((json.dumps(a_geom))))
except (ConnectionError, HTTPError) as err:
print('Error: '.format(err))
#paramsConveted.append (value)
return paramsConveted
class NonSpatialResource(AbstractResource):
def response_of_request(self, attributes_functions_str):
att_funcs = attributes_functions_str.split('/')
if (not self.is_operation(att_funcs[0])) and self.is_attribute(att_funcs[0]):
att_funcs = att_funcs[1:]
self.current_object_state = self._execute_attribute_or_method(self.object_model, att_funcs[0], att_funcs[1:])
if hasattr(self.current_object_state, 'model') and issubclass(self.current_object_state.model, Model):
class_name = self.current_object_state.model.__name__ + 'Serializer'
serializer_cls = self.object_model.class_for_name(self.serializer_class.__module__, class_name)
if isinstance(self.current_object_state, QuerySet):
self.current_object_state = serializer_cls(self.current_object_state, many=True,
context={'request': self.request}).data
elif isinstance(self.current_object_state.field, OneToOneField):
self.current_object_state = serializer_cls(self.current_object_state, context={'request': self.request}).data
else:
self.current_object_state = serializer_cls(self.current_object_state, many=True, context={'request': self.request}).data
a_value = {self.name_of_last_operation_executed: self.current_object_state}
return (a_value, 'application/json', self.object_model, {'status': 200})
| |
= [default_values]
for j in range(len(default_values)):
point = []
for i in range(len(default_values)):
if isinstance(default_values[i], bool):
point.append(default_values[i])
else:
low = default_values[i]-1
high = default_values[i]+1
#checking if low is higher than the lower hard_bound
if hard_bounds[i][0] != None:
while low < hard_bounds[i][0]:
low += 0.1
#checking if up is lower than the upper hard_bound
if hard_bounds[i][1] != None:
while high > hard_bounds[i][1]:
high += -0.1
#generates random points around the default values
point.append(round(random.uniform(low,high),4))
start_values.append(point)
#starting a count for the report statistics
point_count = len(start_values)
#creating the output folder for the starting point - this folder will include folders for each starting points
startingpath = write.createSubFolder(os.path.join(outputspath, "starting_points"), "starting_points")
#Evaluation of the first points
if commands.multi is True:
futureChunks = []
for i in range(len(start_values)):
futureChunks.append([start_values[i],i])
inputs = [commands, config, startingpath, parameters, InpxName, InpxPath, "iteration_0", value_names, video_values, running]
results = define.createWorkers(futureChunks, runVissimForCalibrationAnalysis, inputs, commands)
#unpacking results and writing to report
p_values = []
for i in range(len(results)):
for j in range(len(results[i][0])):
write.writeInFile(out, [results[i][1][j], start_values[i], results[i][0][j]])
p_values.append(results[i][0][j])
else:
inputs = [commands, config, startingpath, parameters, InpxName, InpxPath, "iteration_0", value_names, video_values, running]
p_values, name = runVissimForCalibrationAnalysis(start_values, inputs)
#writing to report
for i in range(len(p_values)):
write.writeInFile(out, [name[i], start_values[i]], p_values[i])
points = copy.deepcopy(start_values)
gap_p_values = [p_values[i][0] for i in range(len(p_values))]
##################
# Algorythm
##################
# **** 1. sorting the p_values and the points together *****************
gap_p_values, points = define.sort2lists(gap_p_values, points)
#diameter
Delta = simplicesDiameter(points) #Delta = diam(Y)
#To call the statistical function:
#H-statistic, p-value = kruskalwallis(default_value, calculated_value)
#if p<0.05 then first array is statistically different from second array
#normally len(array) must be >= 5
while Delta > Delta_tol or gap_p_values[0] >= 0.05 and itt <= max_itt:
#prequisite stuff
## iteration number
itt += 1
if commands.verbose:
print ' == Starting work on iteration number ' + str(itt) + ' =='
##iteration folder
iterationpath = write.createSubFolder(os.path.join(outputspath, "iteration_"+str(itt)), "iteration_"+str(itt))
# **** 2. Reflect ******************************************************
worst = points[-1]; points.pop(-1)
worst_p = gap_p_values[-1]; gap_p_values.pop(-1)
##calculating yc
yc = []
coordinates = [[] for i in range(len(points[0]))]
for i in range(len(points)): #must find the centroid for each coordinates
for o in range(len(points[i])):
coordinates[o].append(points[i][o])
for coord in coordinates:
yc.append(np.mean(coord)) #yc = Σ(de i=0 à n−1) yi/n
##reflecting every coordinates
yr = newPoint(worst, yc, delta_r,hard_bounds)
##evaluating the new point
inputs = [commands, config, iterationpath, parameters, InpxName, InpxPath, "iteration_"+str(itt)+"_reflect", value_names, video_values, running]
yr_p_values, name = runVissimForCalibrationAnalysis(yr, inputs)
write.writeInFile(out, [name, yr, yr_p_values])
#incrementing stats
point_count += 1
#3. **** Expand ** and 1. Sort **************************************
if yr_p_values[0] < gap_p_values[0]: #the indice must correspond to the one used to define gap_p_values
##expanding
ye = newPoint(worst, yc, delta_e,hard_bounds)
##evaluating the new point
inputs = [commands, config, iterationpath, parameters, InpxName, InpxPath, "iteration_"+str(itt)+"_expand", value_names, video_values, running]
ye_p_values, name = runVissimForCalibrationAnalysis(ye, inputs)
write.writeInFile(out, [name, ye, ye_p_values])
#incrementing stats
point_count += 1
if ye_p_values[0] < yr_p_values: #the indice must correspond to the one used to define gap_p_values
gap_p_values,points = appendAndsort2lists_withPriority(gap_p_values,points,ye_p_values[0],ye,'expansion')
Delta = simplicesDiameter(points)
if commands.verbose:
print (' == Iteration concluded with an expansion == \n'
'')
continue
else:
gap_p_values,points = appendAndsort2lists_withPriority(gap_p_values,points,yr_p_values[0],yr,'reflection')
Delta = simplicesDiameter(points)
if commands.verbose:
print (' == Iteration concluded with a reflection == \n'
'')
continue
#4. **** Contract ** and 1. Sort ************************************
elif yr_p_values[0] >= gap_p_values[-1]: #the indice must correspond to the one used to define gap_p_values
#outside contraction
if yr_p_values[0] < worst_p: #the indice must correspond to the one used to define gap_p_values
##contracting
yoc = newPoint(worst, yc, delta_oc,hard_bounds)
##evaluating the new point
inputs = [commands, config, iterationpath, parameters, InpxName, InpxPath, "iteration_"+str(itt)+"_outide_contraction", value_names, video_values, running]
yoc_p_values, name = runVissimForCalibrationAnalysis(yoc, inputs)
write.writeInFile(out, [name, yoc, yoc_p_values])
#incrementing stats
point_count += 1
if yoc_p_values[0] < yr_p_values[0]:
gap_p_values,points = appendAndsort2lists_withPriority(gap_p_values,points,yoc_p_values[0],yoc,'outside contraction')
Delta = simplicesDiameter(points)
if commands.verbose:
print (' == Iteration concluded with an outside contraction == \n'
'')
continue
else:
pass
#inside contraction
else:
##contracting
yic = newPoint(worst, yc, delta_ic,hard_bounds)
##evaluating the new point
inputs = [commands, config, iterationpath, parameters, InpxName, InpxPath, "iteration_"+str(itt)+"_inside_contraction", value_names, video_values, running]
yic_p_values, name = runVissimForCalibrationAnalysis(yic, inputs)
write.writeInFile(out, [name, yic, yic_p_values])
#incrementing stats
point_count += 1
if yic_p_values[0] < yr_p_values[0]: #the indice must correspond to the one used to define gap_p_values
gap_p_values,points = appendAndsort2lists_withPriority(gap_p_values,points,yic_p_values[0],yic,'inside contraction')
Delta = simplicesDiameter(points)
if commands.verbose:
print (' == Iteration concluded with an inside contraction == \n'
'')
continue
else:
pass
#****** 5. Skrink ** and 1. Sort ***************************************
##generating the folder
shrinkpath = write.createSubFolder(os.path.join(iterationpath, "shrink"), "shrink")
##writing the kept point to the report
write.writeInFile(out, ["iteration_"+str(itt)+"_shrink_y0", points[0], gap_p_values[0]])
##generating the shrinked points y0, y1', y2' ..., yn'
points.append(worst)
shrink_points = []
for i in range(len(points)-1):
construction_point = []
for o in range(len(points[i+1])):
construction_point.append(points[0][o] + gamma_s*(points[i+1][o]-points[0][o]))
shrink_points.append(construction_point)
##evaluating the new points
if commands.multi is True:
#shutting down the verbose command in multiprocessing
if commands.verbose:
commands.verbose = False
restart = True
futureChunks = []
for i in range(len(shrink_points)):
futureChunks.append([shrink_points[i],i])
inputs = [commands, config, shrinkpath, parameters, InpxName, InpxPath, "iteration_"+str(itt)+"_shrink", value_names, video_values, running]
results = define.createWorkers(futureChunks, runVissimForCalibrationAnalysis, inputs, commands)
#reenabling verbose for the rest of the iteration
if restart is True: commands.verbose = True
#unpacking results and writing to report
shrink_p_values = []
for i in range(len(results)):
for j in range(len(results[i][0])):
write.writeInFile(out, [results[i][1][j], start_values[i], results[i][0][j]])
shrink_p_values.append(results[i][0][j])
else:
inputs = [commands, config, shrinkpath, parameters, InpxName, InpxPath, "iteration_"+str(itt)+"_shrink", value_names, video_values, running]
shrink_p_values, name = runVissimForCalibrationAnalysis(shrink_points, inputs)
for i in range(len(shrink_points)):
write.writeInFile(out, [name[i], shrink_points[i], shrink_p_values[i]])
#incrementing stats
point_count += len(shrink_points)
#working out the p_value to be used
gap_shrink_p_values = []
for i in range(len(shrink_p_values)):
gap_shrink_p_values.append(shrink_p_values[i][0])
gap_p_values,points = appendAndsort2lists_withPriority(gap_p_values[0],points[0],gap_shrink_p_values,shrink_points,'shrink')
Delta = simplicesDiameter(points)
if commands.verbose:
print (' == Iteration concluded with a shrink == \n'
'')
continue
#0. **** Initialize next step if f0 < fr <= fn-1 ** and 1. Sort ******
else:
gap_p_values,points = appendAndsort2lists_withPriority(gap_p_values,points,yr_p_values,yr,'reflection')
Delta = simplicesDiameter(points)
if commands.verbose:
print (' == Iteration concluded with a reflection == \n'
'')
continue
#need more code?
if commands.verbose:
if itt >= max_itt:
print '-> maximum number of iterations reached. Aborting calculations'
else:
print '-> Optimum found'
return itt, point_count
def pattern_search(out, config, commands, rangevalues, default_values, outputspath, first_start_default = True):
'''
Pseudocode:
*****************************************************************
* from: Introduction to derivative-free optimization p.120 *
* Algorithm 7.1 (Coordinate-search method). *
*****************************************************************
Initialization: Choose x0, and α0 > 0.
For k = 0, 1, 2, ...
1. Poll step: Order the poll set Pk = {xk +αkd : d ∈ D⊕}. Start evaluating f
at the poll points following the order determined. If a poll point xk +αkdk is
found such that f (xk +αkdk ) < f (xk), then stop polling, set xk+1 = xk +αkdk,
and declare the iteration and the poll step successful. Otherwise, declare the
iteration (and the poll step) unsuccessful and set xk+1 = xk.
2. Parameter update: If the iteration was successful, tset αk+1 = αk (or αk+1 =
2αk ). Otherwise, set αk+1 = αk/2.
The poll step makes at most |Dk | (where |Dk| ≥ 2n + 1) function evaluations and
exactly that many at all unsuccessful iterations.
The natural stopping criterion in directional direct search is to terminate the run
when αk < αtol , for a chosen tolerance αtol > 0 (for instance, αtol = 10−5).
'''
#Defining parameters #may add some random generation?
##Alpha0 #the values are now arbitrary and no search was done to define them
alpha_0 = 1
##Betas
beta_1 | |
create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, '00:01|00:05')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertFalse(os.path.isdir(root.storage_dir))
self.assertEquals(log_count('not cleaning up RootAnalysis({}) (found outstanding work)'.format(root.uuid)), 1)
def test_local_analysis_mode_single(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(local_analysis_modes=['test_groups'], pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
def test_local_analysis_mode_missing_default(self):
saq.CONFIG['engine']['default_analysis_mode'] = 'test_single'
# when we specify a default analysis mode that is not in the locally supported modes of the engine
# it should automatically get added to the list of locally supported modes
# we specify test_single as the supported local analysis mode, but the default is test_empty
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.analysis_mode = 'test_single'
root.save()
root.schedule()
engine = TestEngine(local_analysis_modes=['test_empty'],
pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
#self.assertIsNotNone(analysis)
# both test_empty and test_single should be in this list
self.assertEquals(len(engine.local_analysis_modes), 2)
self.assertTrue('test_single' in engine.local_analysis_modes)
self.assertTrue('test_empty' in engine.local_analysis_modes)
def test_local_analysis_mode_missing_pool(self):
saq.CONFIG['engine']['default_analysis_mode'] = 'test_empty'
# test_empty is specified as the only supported mode
# but we specify a pool for test_single
# this is a configuration error
engine = TestEngine(local_analysis_modes=['test_empty'],
analysis_pools={'test_single': 1})
wait_for_log_count('attempted to add analysis pool for mode test_single which is not supported by this engine', 1, 5)
def test_local_analysis_mode_not_local(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
# but we target test_single for this analysis
root.analysis_mode = 'test_single'
root.save()
root.schedule()
# we say we only support test_empty analysis modes
engine = TestEngine(local_analysis_modes=['test_empty'])
engine.enable_module('analysis_module_basic_test', 'test_empty')
engine.controlled_stop()
engine.start()
engine.wait()
# this should exit out since the workload entry is for test_single analysis mode
# but we don't support that with this engine so it shouldn't see it
def test_local_analysis_mode_remote_pickup(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
# but we target test_single for this analysis
root.analysis_mode = 'test_single'
root.save()
root.schedule()
# remember the old storage dir
old_storage_dir = root.storage_dir
# we say we only support test_empty analysis modes
engine = TestEngine(local_analysis_modes=['test_empty'],
analysis_pools={'test_empty': 1})
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
# this should exist out since we don't support this analysis mode with this engine instance
engine.wait()
# make sure our stuff is still there
self.assertTrue(os.path.exists(old_storage_dir))
# start an api server for this node
self.start_api_server()
self.reset_config()
# now start another engine on a different "node"
saq.CONFIG['global']['node'] = 'second_host'
saq.set_node('second_host')
saq.CONFIG['analysis_mode_test_single']['cleanup'] = 'no'
# and this node handles the test_single mode
saq.CONFIG['engine']['local_analysis_modes'] = 'test_single'
saq.CONFIG['engine']['analysis_pool_size_test_single'] = '1'
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.start()
# since this is remote we can't use the technique where we call controlled_stop and
# wait for the queues to empty because only the local queue is checked (which is currently empty)
# look for the log to move the work target
wait_for_log_count('downloading work target {} from '.format(root.uuid), 1, 5)
wait_for_log_count('completed analysis RootAnalysis({})'.format(root.uuid), 1, 5)
engine.controlled_stop()
engine.wait()
# now the old storage directory should be gone
self.assertFalse(os.path.exists(old_storage_dir))
# but there should be a new one in the new "node"
root = RootAnalysis(storage_dir=storage_dir_from_uuid(root.uuid))
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
@use_db
def test_local_analysis_mode_remote_pickup_invalid_company_id(self, db, c):
# TestCase - we've got nothing to do locally but there is work
# on a remote server, but that work is assigned to a different company
# we do NOT grab that work
# first we add a new company
c.execute("INSERT INTO company ( name ) VALUES ( 'unittest' )")
db.commit()
# get the new company_id
c.execute("SELECT id FROM company WHERE name = 'unittest'")
row = c.fetchone()
self.assertIsNotNone(row)
other_company_id = row[0]
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
# but we target test_single for this analysis
root.analysis_mode = 'test_single'
root.company_id = other_company_id
root.save()
root.schedule()
# remember the old storage dir
old_storage_dir = root.storage_dir
# we say we only support test_empty analysis modes
engine = TestEngine(local_analysis_modes=['test_empty'],
analysis_pools={'test_empty': 1})
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
# this should exit out since we do not support this analysis mode with this engine
engine.wait()
# make sure our stuff is still there
self.assertTrue(os.path.exists(old_storage_dir))
# start an api server for this node
self.start_api_server()
self.reset_config()
# now start another engine on a different "node"
saq.CONFIG['global']['node'] = 'second_host'
saq.set_node('second_host')
saq.CONFIG['analysis_mode_test_single']['cleanup'] = 'no'
# and this node handles the test_single mode
saq.CONFIG['engine']['local_analysis_modes'] = 'test_single'
saq.CONFIG['engine']['analysis_pool_size_test_single'] = '1'
engine = TestEngine(local_analysis_modes=['test_single'],
analysis_pools={'test_single': 1})
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
# we should see the same thing happen since the remote work is assigned to the other company
engine.wait()
# make sure our stuff is still there
self.assertTrue(os.path.exists(old_storage_dir))
@use_db
def test_status_update(self, db, c):
# start an empty engine and wait for the node update
engine = TestEngine()
engine.start()
wait_for_log_count('updated node', 1, 5)
# do we have an entry in the nodes database table?
c.execute("SELECT name, location, company_id, last_update FROM nodes WHERE id = %s", (saq.SAQ_NODE_ID,))
row = c.fetchone()
self.assertIsNotNone(row)
self.assertEquals(row[0], saq.SAQ_NODE)
self.assertEquals(row[1], saq.API_PREFIX)
self.assertEquals(row[2], saq.COMPANY_ID)
engine.stop()
engine.wait()
@use_db
def test_node_modes_update(self, db, c):
# when an Engine starts up it updates the node_modes database with the list of analysis modes it locally supports
# configure to support two modes
engine = TestEngine(local_analysis_modes=['test_empty', 'test_single'])
engine.controlled_stop()
engine.start()
engine.wait()
# we should have two entries in the node_modes database for the current node_id
c.execute("SELECT analysis_mode FROM node_modes WHERE node_id = %s ORDER BY analysis_mode ASC", (saq.SAQ_NODE_ID,))
self.assertEquals(c.fetchone(), ('test_empty',))
self.assertEquals(c.fetchone(), ('test_single',))
# and the any_mode column should be 0 for this node
c.execute("SELECT any_mode FROM nodes WHERE id = %s", (saq.SAQ_NODE_ID,))
self.assertEquals(c.fetchone(), (0,))
@use_db
def test_node_modes_update_any(self, db, c):
# when an Engine starts up it updates the node_modes database with the list of analysis modes it locally supports
# configure to support two modes
engine = TestEngine(local_analysis_modes=[])
engine.controlled_stop()
engine.start()
engine.wait()
# we should have NO entries in the node_modes database for the current node_id
c.execute("SELECT analysis_mode FROM node_modes WHERE node_id = %s ORDER BY analysis_mode ASC", (saq.SAQ_NODE_ID,))
self.assertIsNone(c.fetchone())
# and the any_mode column should be 1 for this node
c.execute("SELECT any_mode FROM nodes WHERE id = %s", (saq.SAQ_NODE_ID,))
self.assertEquals(c.fetchone(), (1,))
@use_db
def test_primary_node(self, db, c):
# test having a node become the primary node
saq.CONFIG['engine']['node_status_update_frequency'] = '1'
engine = TestEngine()
engine.start()
wait_for_log_count('this node {} has become the primary node'.format(saq.SAQ_NODE), 1, 5)
c.execute("SELECT name FROM nodes WHERE id = %s AND is_primary = 1", (saq.SAQ_NODE_ID,))
self.assertIsNotNone(c.fetchone())
engine.stop()
engine.wait()
@use_db
def test_primary_node_contest(self, db, c):
# test having a node become the primary node
# and then another node NOT becoming a primary node because there already is one
engine = TestEngine()
engine.start()
wait_for_log_count('this node {} has become the primary node'.format(saq.SAQ_NODE), 1, 5)
c.execute("SELECT name FROM nodes WHERE id = %s AND is_primary = 1", (saq.SAQ_NODE_ID,))
self.assertIsNotNone(c.fetchone())
engine.stop()
engine.wait()
saq.set_node('another_node')
engine = TestEngine()
engine.start()
wait_for_log_count('node {} is not primary'.format(saq.SAQ_NODE), 1, 5)
engine.stop()
engine.wait()
@use_db
def test_primary_node_contest_winning(self, db, c):
# test having a node become the primary node
# after another node times out
engine = TestEngine()
engine.start()
wait_for_log_count('this node {} has become the primary node'.format(saq.SAQ_NODE), 1, 5)
c.execute("SELECT name FROM nodes WHERE id = %s AND is_primary = 1", (saq.SAQ_NODE_ID,))
self.assertIsNotNone(c.fetchone())
engine.stop()
engine.wait()
# update the node to make it look like it last updated a while ago
c.execute("UPDATE nodes SET last_update = ADDTIME(last_update, '-1:00:00') WHERE id = %s", (saq.SAQ_NODE_ID,))
db.commit()
c.execute("SELECT last_update FROM nodes WHERE id = %s", (saq.SAQ_NODE_ID,))
saq.set_node('another_node')
engine = TestEngine()
engine.start()
wait_for_log_count('this node {} has become the primary node'.format(saq.SAQ_NODE), 1, 5)
engine.stop()
engine.wait()
@use_db
def test_primary_node_clear_locks(self, db, c):
target = str(uuid.uuid4())
lock_uuid = str(uuid.uuid4())
self.assertTrue(acquire_lock(target, lock_uuid))
saq.LOCK_TIMEOUT_SECONDS = 0
# test having a node become the primary node
# and then clearing out an expired lock
engine = TestEngine()
engine.start()
wait_for_log_count('this node {} has become the primary node'.format(saq.SAQ_NODE), 1, 5)
wait_for_log_count('removed 1 expired locks', 1, 5)
engine.stop()
engine.wait()
# make sure the lock is gone
c.execute("SELECT uuid FROM locks WHERE | |
from __future__ import print_function, division
import numpy as np
from .astroTimeLegacy import premat, daycnv, precess, helio_jd
from .idlMod import idlMod
from PyAstronomy.pyaC import pyaErrors as PE
import six
import six.moves as smo
def baryvel(dje, deq):
"""
Calculate helio- and barycentric velocity.
.. note:: The "JPL" option present in IDL is not provided here.
Parameters
----------
dje : float
Julian ephemeris date
deq : float
Epoch of mean equinox of helio- and barycentric velocity output.
If `deq` is zero, `deq` is assumed to be equal to `dje`.
Returns
-------
dvelh : array
Heliocentric velocity vector [km/s].
dvelb : array
Barycentric velocity vector [km/s].
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
pro baryvel, dje, deq, dvelh, dvelb, JPL = JPL
NAME:
BARYVEL
PURPOSE:
Calculates heliocentric and barycentric velocity components of Earth.
EXPLANATION:
BARYVEL takes into account the Earth-Moon motion, and is useful for
radial velocity work to an accuracy of ~1 m/s.
CALLING SEQUENCE:
BARYVEL, dje, deq, dvelh, dvelb, [ JPL = ]
INPUTS:
DJE - (scalar) Julian ephemeris date.
DEQ - (scalar) epoch of mean equinox of dvelh and dvelb. If deq=0
then deq is assumed to be equal to dje.
OUTPUTS:
DVELH: (vector(3)) heliocentric velocity component. in km/s
DVELB: (vector(3)) barycentric velocity component. in km/s
The 3-vectors DVELH and DVELB are given in a right-handed coordinate
system with the +X axis toward the Vernal Equinox, and +Z axis
toward the celestial pole.
OPTIONAL KEYWORD SET:
JPL - if /JPL set, then BARYVEL will call the procedure JPLEPHINTERP
to compute the Earth velocity using the full JPL ephemeris.
The JPL ephemeris FITS file JPLEPH.405 must exist in either the
current directory, or in the directory specified by the
environment variable ASTRO_DATA. Alternatively, the JPL keyword
can be set to the full path and name of the ephemeris file.
A copy of the JPL ephemeris FITS file is available in
http://idlastro.gsfc.nasa.gov/ftp/data/
PROCEDURES CALLED:
Function PREMAT() -- computes precession matrix
JPLEPHREAD, JPLEPHINTERP, TDB2TDT - if /JPL keyword is set
NOTES:
Algorithm taken from FORTRAN program of Stumpff (1980, A&A Suppl, 41,1)
Stumpf claimed an accuracy of 42 cm/s for the velocity. A
comparison with the JPL FORTRAN planetary ephemeris program PLEPH
found agreement to within about 65 cm/s between 1986 and 1994
If /JPL is set (using JPLEPH.405 ephemeris file) then velocities are
given in the ICRS system; otherwise in the FK4 system.
EXAMPLE:
Compute the radial velocity of the Earth toward Altair on 15-Feb-1994
using both the original Stumpf algorithm and the JPL ephemeris
IDL> jdcnv, 1994, 2, 15, 0, jd ;==> JD = 2449398.5
IDL> baryvel, jd, 2000, vh, vb ;Original algorithm
==> vh = [-17.07243, -22.81121, -9.889315] ;Heliocentric km/s
==> vb = [-17.08083, -22.80471, -9.886582] ;Barycentric km/s
IDL> baryvel, jd, 2000, vh, vb, /jpl ;JPL ephemeris
==> vh = [-17.07236, -22.81126, -9.889419] ;Heliocentric km/s
==> vb = [-17.08083, -22.80484, -9.886409] ;Barycentric km/s
IDL> ra = ten(19,50,46.77)*15/!RADEG ;RA in radians
IDL> dec = ten(08,52,3.5)/!RADEG ;Dec in radians
IDL> v = vb[0]*cos(dec)*cos(ra) + $ ;Project velocity toward star
vb[1]*cos(dec)*sin(ra) + vb[2]*sin(dec)
REVISION HISTORY:
<NAME>, U.C. Berkeley Translated BARVEL.FOR to IDL.
<NAME>, Cleaned up program sent by <NAME> (SfSU) June 1994
Converted to IDL V5.0 <NAME> September 1997
Added /JPL keyword <NAME> July 2001
Documentation update W. Landsman Dec 2005
"""
# Define constants
dc2pi = 2 * np.pi
cc2pi = 2 * np.pi
dc1 = 1.0
dcto = 2415020.0
dcjul = 36525.0 # days in Julian year
dcbes = 0.313
dctrop = 365.24219572 # days in tropical year (...572 insig)
dc1900 = 1900.0
AU = 1.4959787e8
# Constants dcfel(i,k) of fast changing elements.
dcfel = [1.7400353e00, 6.2833195099091e02, 5.2796e-6, 6.2565836e00, 6.2830194572674e02, -2.6180e-6, 4.7199666e00, 8.3997091449254e03, -1.9780e-5, 1.9636505e-1, 8.4334662911720e03, -5.6044e-5,
4.1547339e00, 5.2993466764997e01, 5.8845e-6, 4.6524223e00, 2.1354275911213e01, 5.6797e-6, 4.2620486e00, 7.5025342197656e00, 5.5317e-6, 1.4740694e00, 3.8377331909193e00, 5.6093e-6]
dcfel = np.resize(dcfel, (8, 3))
# constants dceps and ccsel(i,k) of slowly changing elements.
dceps = [4.093198e-1, -2.271110e-4, -2.860401e-8]
ccsel = [1.675104e-2, -4.179579e-5, -1.260516e-7, 2.220221e-1, 2.809917e-2, 1.852532e-5, 1.589963e00, 3.418075e-2, 1.430200e-5, 2.994089e00, 2.590824e-2, 4.155840e-6, 8.155457e-1, 2.486352e-2, 6.836840e-6, 1.735614e00, 1.763719e-2, 6.370440e-6, 1.968564e00, 1.524020e-2, -2.517152e-6, 1.282417e00, 8.703393e-3, 2.289292e-5, 2.280820e00,
1.918010e-2, 4.484520e-6, 4.833473e-2, 1.641773e-4, -4.654200e-7, 5.589232e-2, -3.455092e-4, -7.388560e-7, 4.634443e-2, -2.658234e-5, 7.757000e-8, 8.997041e-3, 6.329728e-6, -1.939256e-9, 2.284178e-2, -9.941590e-5, 6.787400e-8, 4.350267e-2, -6.839749e-5, -2.714956e-7, 1.348204e-2, 1.091504e-5, 6.903760e-7, 3.106570e-2, -1.665665e-4, -1.590188e-7]
ccsel = np.resize(ccsel, (17, 3))
# Constants of the arguments of the short-period perturbations.
dcargs = [5.0974222e0, -7.8604195454652e2, 3.9584962e0, -5.7533848094674e2, 1.6338070e0, -1.1506769618935e3, 2.5487111e0, -3.9302097727326e2, 4.9255514e0, -5.8849265665348e2, 1.3363463e0, -5.5076098609303e2, 1.6072053e0, -5.2237501616674e2, 1.3629480e0, -
1.1790629318198e3, 5.5657014e0, -1.0977134971135e3, 5.0708205e0, -1.5774000881978e2, 3.9318944e0, 5.2963464780000e1, 4.8989497e0, 3.9809289073258e1, 1.3097446e0, 7.7540959633708e1, 3.5147141e0, 7.9618578146517e1, 3.5413158e0, -5.4868336758022e2]
dcargs = np.resize(dcargs, (15, 2))
# Amplitudes ccamps(n,k) of the short-period perturbations.
ccamps = \
[-2.279594e-5, 1.407414e-5, 8.273188e-6, 1.340565e-5, -2.490817e-7, -3.494537e-5, 2.860401e-7, 1.289448e-7, 1.627237e-5, -1.823138e-7, 6.593466e-7, 1.322572e-5, 9.258695e-6, -4.674248e-7, -3.646275e-7, 1.140767e-5, -2.049792e-5, -4.747930e-6, -2.638763e-6, -1.245408e-7, 9.516893e-6, -2.748894e-6, -1.319381e-6, -4.549908e-6, -1.864821e-7, 7.310990e-6, -1.924710e-6, -8.772849e-7, -3.334143e-6, -1.745256e-7, -2.603449e-6, 7.359472e-6, 3.168357e-6, 1.119056e-6, -1.655307e-7, -3.228859e-6,
1.308997e-7, 1.013137e-7, 2.403899e-6, -3.736225e-7, 3.442177e-7, 2.671323e-6, 1.832858e-6, -2.394688e-7, -3.478444e-7, 8.702406e-6, -8.421214e-6, -1.372341e-6, -1.455234e-6, -4.998479e-8, -1.488378e-6, -1.251789e-5, 5.226868e-7, -2.049301e-7, 0.e0, -8.043059e-6, -2.991300e-6, 1.473654e-7, -3.154542e-7, 0.e0, 3.699128e-6, -3.316126e-6, 2.901257e-7, 3.407826e-7, 0.e0, 2.550120e-6, -1.241123e-6, 9.901116e-8, 2.210482e-7, 0.e0, -6.351059e-7, 2.341650e-6, 1.061492e-6, 2.878231e-7, 0.e0]
ccamps = np.resize(ccamps, (15, 5))
# Constants csec3 and ccsec(n,k) of the secular perturbations in longitude.
ccsec3 = -7.757020e-8
ccsec = [1.289600e-6, 5.550147e-1, 2.076942e00, 3.102810e-5, 4.035027e00, 3.525565e-1,
9.124190e-6, 9.990265e-1, 2.622706e00, 9.793240e-7, 5.508259e00, 1.559103e01]
ccsec = np.resize(ccsec, (4, 3))
# Sidereal rates.
dcsld = 1.990987e-7 # sidereal rate in longitude
ccsgd = 1.990969e-7 # sidereal rate in mean anomaly
# Constants used in the calculation of the lunar contribution.
cckm = 3.122140e-5
ccmld = 2.661699e-6
ccfdi = 2.399485e-7
# Constants dcargm(i,k) of the arguments of the perturbations of the motion
# of the moon.
dcargm = [5.1679830e0, 8.3286911095275e3, 5.4913150e0, -
7.2140632838100e3, 5.9598530e0, 1.5542754389685e4]
dcargm = np.resize(dcargm, (3, 2))
# Amplitudes ccampm(n,k) of the perturbations of the moon.
ccampm = [1.097594e-1, 2.896773e-7, 5.450474e-2, 1.438491e-7, -2.223581e-2, 5.083103e-8,
1.002548e-2, -2.291823e-8, 1.148966e-2, 5.658888e-8, 8.249439e-3, 4.063015e-8]
ccampm = np.resize(ccampm, (3, 4))
# ccpamv(k)=a*m*dl,dt (planets), dc1mme=1-mass(earth+moon)
ccpamv = [8.326827e-11, 1.843484e-11, 1.988712e-12, 1.881276e-12]
dc1mme = 0.99999696e0
# Time arguments.
dt = (dje - dcto) / dcjul
tvec = np.array([1e0, dt, dt * dt])
# Values of all elements for the instant(aneous?) dje.
temp = idlMod(np.dot(dcfel, tvec), dc2pi)
dml = temp[0]
forbel = temp[1:8]
g = forbel[0] # old fortran equivalence
deps = idlMod(np.sum(tvec * dceps), dc2pi)
sorbel = idlMod(np.dot(ccsel, tvec), dc2pi)
e = sorbel[0] # old fortran equivalence
# Secular perturbations in longitude.
dummy = np.cos(2.0)
sn = np.sin(idlMod(np.dot(ccsec[::, 1:3], tvec[0:2]), cc2pi))
# Periodic perturbations of the emb (earth-moon barycenter).
pertl = np.sum(ccsec[::, 0] * sn) + (dt * ccsec3 * sn[2])
pertld = 0.0
pertr = 0.0
pertrd = 0.0
for k in smo.range(15):
a = idlMod((dcargs[k, 0] + dt * dcargs[k, 1]), dc2pi)
cosa = np.cos(a)
sina = np.sin(a)
pertl = pertl + ccamps[k, 0] * cosa + ccamps[k, 1] * sina
pertr = pertr + ccamps[k, 2] * cosa + ccamps[k, 3] * sina
if k < 11:
pertld = pertld + (ccamps[k, 1] * cosa -
ccamps[k, 0] * sina) * ccamps[k, 4]
pertrd = pertrd + (ccamps[k, 3] * cosa -
ccamps[k, 2] * sina) * ccamps[k, 4]
# Elliptic part of the motion of the emb.
phi = (e * e / 4e0) * (((8e0 / e) - e) * np.sin(g) + 5 *
np.sin(2 * g) + (13 / 3e0) * e * np.sin(3 * g))
f = g + phi
sinf = np.sin(f)
cosf = np.cos(f)
dpsi = (dc1 - e * e) / (dc1 + e * cosf)
phid = 2 * e * ccsgd * ((1 + 1.5 * e * e) * cosf +
e * (1.25 - 0.5 * sinf * sinf))
psid = ccsgd * e * sinf / | |
<filename>pypipeline/experiment_runner.py
#!/usr/bin/python
#
import os
import sys
import getopt
import math
import tempfile
import stat
import re
import shlex
import time
import subprocess
from subprocess import Popen
import glob
import topological
from util import get_new_directory
from util import get_new_file
from collections import defaultdict
from pypipeline.pipeline import Stage, PipelineRunner, RootStage, NamedStage
def get_subset(expparams_list, **keywords):
'''Gets the subset of ExpParams objects for which all the keywords specified
are also parameters for that ExpParams.
'''
subset = []
for expparams in expparams_list:
# Check whether this stage matches all the key/value pairs specified.
contains_all = True
for k,v in keywords.items():
if not expparams.get(k) == v:
contains_all = False
break
if contains_all:
subset.append(expparams)
return subset
class ExpParams(Stage):
def __init__(self, dictionary=None, **keywords):
Stage.__init__(self)
self.params = {}
self.exclude_name_keys = set()
self.exclude_arg_keys = set()
if dictionary:
self.params.update(dictionary)
if keywords:
self.params.update(keywords)
self.kvsep = "\t"
self.paramsep = "\n"
self.none_string = ""
self.key_order = None
# The prefix for dummy keys
self.dummy_key_prefix = "__arg__"
# The separator for key/value parameters in the argument string
self.args_kvsep = " "
self.script_fns = []
def __str__(self):
return "ExpParams[params=%s exclude_name_keys=%s exclude_arg_keys=%s]" % \
(str(self.params), str(self.exclude_name_keys), str(self.exclude_arg_keys))
def create_stage_script(self, exp_dir):
'''Creates and returns the experiment script and writes the expparams.txt file.
Overriding method for Stage.
'''
script = ""
for script_fns in self.script_fns:
script += script_fns(self, exp_dir)
# Creates and returns the experiment script string.
script += self.create_experiment_script(exp_dir)
# Write out the experiment parameters to a file
# Do this after create_experiment_script in case there are additions to the parameters
# made by that call.
self.write(os.path.join(exp_dir, "expparams.txt"))
return script
def __add__(self, other):
''' Overloading operator + '''
return self.concat(other)
def concat(self, other):
'''Returns a copy of self plus all the parameters of other.
Note that other's params override self.
'''
if isinstance(other, ExpParams):
new_exp = other.get_instance()
else:
new_exp = self.get_instance()
new_exp.params.update(self.params)
new_exp.exclude_name_keys.update(self.exclude_name_keys)
new_exp.exclude_arg_keys.update(self.exclude_arg_keys)
if isinstance(other, ExpParams):
new_exp.params.update(other.params)
new_exp.exclude_name_keys.update(other.exclude_name_keys)
new_exp.exclude_arg_keys.update(other.exclude_arg_keys)
else:
new_exp.params.update(other)
return new_exp
def copy_with(self, **keywords):
return self.concat(keywords)
def update(self, **keywords):
''' Adds the keywords as parameters. '''
self.params.update(keywords)
def set(self, key, value, incl_name=True, incl_arg=True):
self.params[key] = value
self.set_incl_name(key, incl_name)
self.set_incl_arg(key, incl_arg)
def set_incl_name(self, key, incl_name):
if not incl_name:
self.exclude_name_keys.add(key)
elif key in self.exclude_name_keys:
self.exclude_name_keys.remove(key)
def set_incl_arg(self, key, incl_arg):
if not incl_arg:
self.exclude_arg_keys.add(key)
elif key in self.exclude_arg_keys:
self.exclude_arg_keys.remove(key)
def remove(self, key):
if key in self.params:
del self.params[key]
def get(self, key):
''' Returns the value with its true type '''
return self.params.get(key,None)
def keys(self):
return self.params.keys()
def getstr(self, key):
''' Returns a string version of the value '''
return self._get_as_str(self.get(key))
def add_arg(self, arg):
''' Adds an command line argument which will be printed without its key.'''
dummy_key = self.dummy_key_prefix + str(len(self.params))
self.set(dummy_key, arg, True, True)
def read(self, path):
''' Read parameter names and values from a file '''
filestr = "".join(open(path, 'r').readlines())
for param in filestr.split(self.paramsep):
if param == '':
continue
key,value,exclude_name,exclude_arg = param.split(self.kvsep)
self.params[key] = self._attempt_to_coerce(value)
if exclude_name == "True":
self.exclude_name_keys.add(key)
if exclude_arg == "True":
self.exclude_arg_keys.add(key)
def write(self, path):
''' Write out parameter names and values to a file '''
out = open(path, 'w')
for key,value,exclude_name,exclude_arg in self._get_string_params():
out.write(self.kvsep.join([key, value, exclude_name, exclude_arg]) + self.paramsep)
out.close()
def get_name(self):
''' Returns the name of this experiment '''
name = []
for key in self.get_name_key_order():
value = self.get(key)
if key not in self.exclude_name_keys:
name.append(self._get_as_str(value).replace(",","-"))
return "_".join(name)
def get_args(self):
''' Returns a string consisting of the arguments defined by the parameters of this experiment '''
args = ""
# Add the key/value arguments.
for key,value in sorted(self.params.items()):
if key not in self.exclude_arg_keys and not key.startswith(self.dummy_key_prefix):
if value is None:
args += "--%s " % (self._get_as_str(key))
else:
args += "--%s%s%s " % (self._get_as_str(key), self.args_kvsep, self._get_as_str(value))
# Add the additional command line arguments.
for key,value in self.params.items():
if key not in self.exclude_arg_keys and key.startswith(self.dummy_key_prefix):
args += "%s " % (self._get_as_str(value))
return args
def _get_as_str(self, value):
''' Converts the value to a string '''
if value == None:
return self.none_string
if isinstance(value, int):
return str(value)
elif isinstance(value, float):
return "%g" % (value)
elif isinstance(value, str):
return value
else:
return str(value)
def _attempt_to_coerce(self, value):
if value == self.none_string:
return None
# Note: we could try to first convert to an int,
# and fall back to a float, but it's probably easier to
# start with a float and stay there.
try:
value = float(value)
except ValueError:
pass
return value
def _get_string_params(self):
sps = []
for key in self.params:
exclude_name = key in self.exclude_name_keys
exclude_arg = key in self.exclude_arg_keys
sps.append((key, self.params[key], exclude_name, exclude_arg))
return map(lambda x:map(self._get_as_str, x), sps)
def get_name_key_order(self):
'''Gets the order anew or the cached name key order if present.'''
if self.key_order:
return self.key_order
else:
return self._get_name_key_order()
def _get_name_key_order(self):
'''Creates and returns the name key order.'''
key_order = []
initial_keys = self.get_initial_keys()
all_keys = sorted(self.params.keys())
for key in initial_keys:
if key in all_keys:
key_order.append(key)
for key in all_keys:
if key not in initial_keys:
key_order.append(key)
return key_order
def get_initial_keys(self):
''' OVERRIDE THIS METHOD '''
return []
def get_instance(self):
''' OVERRIDE THIS METHOD '''
return ExpParams()
def create_experiment_script(self, exp_dir):
'''
OVERRIDE THIS METHOD.
Returns a str to be written out as the experiment script
'''
pass
class JavaExpParams(ExpParams):
def __init__(self, dictionary=None, **keywords):
dictionary.update(keywords)
ExpParams.__init__(self,dictionary)
self.hprof = None
self.set("java_args", "", incl_arg=False, incl_name=False)
def get_java_args(self):
return self._get_java_args(self.work_mem_megs)
def _get_java_args(self, total_work_mem_megs):
'''Returns reasonable JVM args based on the total megabytes available'''
work_mem_megs = total_work_mem_megs
if work_mem_megs >= 512+128+256:
# Subtract off some overhead for the JVM
work_mem_megs -= 512
# Subtract off some overhead for the PermSize
max_perm_size = 128
work_mem_megs -= max_perm_size
assert work_mem_megs >= 256, "work_mem_megs=%f" % (work_mem_megs)
else:
work_mem_megs -= 32
max_perm_size = 32
java_args = " -server -ea -Dfile.encoding=UTF8 "
java_args += " -Xms%dm -Xmx%dm -Xss4m" % (work_mem_megs, work_mem_megs)
java_args += " -XX:MaxPermSize=%dm " % (max_perm_size)
# Read more on garbage collection parameters here:
# http://www.oracle.com/technetwork/java/javase/gc-tuning-6-140523.html#cms
threads = self.get("threads")
if threads <= 1:
# Added to ensure parallel garbage collection is NOT running.
java_args += " -XX:-UseParallelGC -XX:-UseParNewGC -XX:+UseSerialGC"
else:
# Alt1: java_args += " -XX:ParallelGCThreads=%d -XX:+UseParallelGC -XX:+UseParallelOldGC" % (threads)
# Alt2: java_args += " -XX:ConcGCThreads=%d -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode" % (threads)
#
# Alt1 is best if throughput is the most important and pauses of up to 1 second
# are acceptable. This is almost always true of experiments.
# Alt2 may cause issues on a grid as it could use too much parallelism, since the garbage collection
# runs concurrently with the other application threads.
java_args += " -XX:ParallelGCThreads=%d -XX:+UseParallelGC -XX:+UseParallelOldGC" % (threads)
#java_args += " -verbose:gc"
if self.hprof == "cpu":
self.update(java_args = self.get("java_args") + " -agentlib:hprof=cpu=samples,depth=7,interval=10 ")
elif self.hprof == "heap":
self.update(java_args = self.get("java_args") + " -agentlib:hprof=heap=sites,depth=7 ")
elif self.hprof is not None:
raise Exception("Unknown argument for hprof: " + self.hprof)
extra_java_args = self.get("java_args")
if extra_java_args is not None:
java_args += " " + extra_java_args
return java_args
def get_instance(self):
''' OVERRIDE THIS METHOD '''
return JavaExpParams()
class PythonExpParams(ExpParams):
def __init__(self, dictionary=None, **keywords):
dictionary.update(keywords)
ExpParams.__init__(self,dictionary)
self.args_kvsep = "="
def get_instance(self):
''' OVERRIDE THIS METHOD '''
return PythonExpParams()
def get_all_keys(expparams):
'''Gets the set of all keys for these expparams.'''
all_keys = set()
for expparam in expparams:
for key,_ in expparam.params.items():
all_keys.add(key)
return all_keys
def get_nonunique_keys(expparams):
'''Gets the set of nonunique keys for these expparams.'''
key2vals = defaultdict(set)
for expparam in expparams:
for key,value in expparam.params.items():
key2vals[key].add(value)
nonunique_keys = set()
for key in key2vals:
if len(key2vals[key]) > 1:
nonunique_keys.add(key)
return nonunique_keys
def get_kept_keys(expparams):
'''Gets the union of the nonunique keys and the initial keys specified by the ExpParams.'''
nonunique_keys = get_nonunique_keys(expparams)
kept_keys = set()
kept_keys.update(nonunique_keys)
for expparam in expparams:
kept_keys.update(set(expparam.get_initial_keys()))
return kept_keys
def get_exclude_name_keys(expparams):
'''Gets all the keys which are excluded from the name of some ExpParam.'''
excluded = set()
for expparam in expparams:
excluded = excluded.union(expparam.exclude_name_keys)
return excluded
def shorten_names(expparams):
'''Shortens the names of a set of expparams.'''
kept_keys = get_kept_keys(expparams)
| |
<gh_stars>0
"""
Handle the email *forward* and *reply*. phase. There are 3 actors:
- contact: who sends emails to <EMAIL> address
- SL email handler (this script)
- user personal email: to be protected. Should never leak to contact.
This script makes sure that in the forward phase, the email that is forwarded to user personal email has the following
envelope and header fields:
Envelope:
mail from: @contact
rcpt to: @personal_email
Header:
From: @contact
To: <EMAIL> # so user knows this email is sent to alias
Reply-to: <EMAIL> # magic HERE
And in the reply phase:
Envelope:
mail from: @contact
rcpt to: @contact
Header:
From: <EMAIL> # so for contact the email comes from alias. magic HERE
To: @contact
The <EMAIL> allows to hide user personal email when user clicks "Reply" to the forwarded email.
It should contain the following info:
- alias
- @contact
"""
import argparse
import asyncio
import email
import os
import time
import uuid
from email import encoders
from email.encoders import encode_noop
from email.message import Message
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.utils import formataddr, make_msgid, formatdate, getaddresses
from io import BytesIO
from smtplib import SMTP, SMTPRecipientsRefused, SMTPServerDisconnected
from typing import List, Tuple, Optional
import aiosmtpd
import aiospamc
import arrow
import spf
from aiosmtpd.controller import Controller
from aiosmtpd.smtp import Envelope
from sqlalchemy.exc import IntegrityError
from app import pgp_utils, s3
from app.alias_utils import try_auto_create
from app.config import (
EMAIL_DOMAIN,
POSTFIX_SERVER,
URL,
POSTFIX_SUBMISSION_TLS,
UNSUBSCRIBER,
LOAD_PGP_EMAIL_HANDLER,
ENFORCE_SPF,
ALERT_REVERSE_ALIAS_UNKNOWN_MAILBOX,
ALERT_BOUNCE_EMAIL,
ALERT_SPAM_EMAIL,
ALERT_SPF,
POSTFIX_PORT,
SENDER,
SENDER_DIR,
SPAMASSASSIN_HOST,
MAX_SPAM_SCORE,
MAX_REPLY_PHASE_SPAM_SCORE,
ALERT_SEND_EMAIL_CYCLE,
ALERT_MAILBOX_IS_ALIAS,
PGP_SENDER_PRIVATE_KEY,
ALERT_BOUNCE_EMAIL_REPLY_PHASE,
NOREPLY,
)
from app.email_utils import (
send_email,
add_dkim_signature,
add_or_replace_header,
delete_header,
render,
get_orig_message_from_bounce,
delete_all_headers_except,
get_spam_info,
get_orig_message_from_spamassassin_report,
parseaddr_unicode,
send_email_with_rate_control,
get_email_domain_part,
copy,
to_bytes,
get_header_from_bounce,
send_email_at_most_times,
is_valid_alias_address_domain,
should_add_dkim_signature,
add_header,
get_header_unicode,
generate_reply_email,
is_reply_email,
normalize_reply_email,
is_valid_email,
replace,
)
from app.extensions import db
from app.greylisting import greylisting_needed
from app.log import LOG
from app.models import (
Alias,
Contact,
EmailLog,
User,
RefusedEmail,
Mailbox,
)
from app.pgp_utils import PGPException, sign_data_with_pgpy, sign_data
from app.spamassassin_utils import SpamAssassin
from app.utils import random_string
from init_app import load_pgp_public_keys
from server import create_app, create_light_app
# forward or reply
_DIRECTION = "X-SimpleLogin-Type"
_IP_HEADER = "X-SimpleLogin-Client-IP"
_EMAIL_LOG_ID_HEADER = "X-SimpleLogin-EmailLog-ID"
_MESSAGE_ID = "Message-ID"
_ENVELOPE_FROM = "X-SimpleLogin-Envelope-From"
_MIME_HEADERS = [
"MIME-Version",
"Content-Type",
"Content-Disposition",
"Content-Transfer-Encoding",
]
_MIME_HEADERS = [h.lower() for h in _MIME_HEADERS]
# fix the database connection leak issue
# use this method instead of create_app
def new_app():
app = create_light_app()
@app.teardown_appcontext
def shutdown_session(response_or_exc):
# same as shutdown_session() in flask-sqlalchemy but this is not enough
db.session.remove()
# dispose the engine too
db.engine.dispose()
return app
def get_or_create_contact(from_header: str, mail_from: str, alias: Alias) -> Contact:
"""
contact_from_header is the RFC 2047 format FROM header
"""
contact_name, contact_email = parseaddr_unicode(from_header)
if not is_valid_email(contact_email):
# From header is wrongly formatted, try with mail_from
if mail_from and mail_from != "<>":
LOG.warning(
"Cannot parse email from from_header %s, parse from mail_from %s",
from_header,
mail_from,
)
_, contact_email = parseaddr_unicode(mail_from)
if not is_valid_email(contact_email):
LOG.warning(
"invalid contact email %s. Parse from %s %s",
contact_email,
from_header,
mail_from,
)
# either reuse a contact with empty email or create a new contact with empty email
contact_email = ""
contact = Contact.get_by(alias_id=alias.id, website_email=contact_email)
if contact:
if contact.name != contact_name:
LOG.d(
"Update contact %s name %s to %s",
contact,
contact.name,
contact_name,
)
contact.name = contact_name
db.session.commit()
# contact created in the past does not have mail_from and from_header field
if not contact.mail_from and mail_from:
LOG.d(
"Set contact mail_from %s: %s to %s",
contact,
contact.mail_from,
mail_from,
)
contact.mail_from = mail_from
db.session.commit()
if not contact.from_header and from_header:
LOG.d(
"Set contact from_header %s: %s to %s",
contact,
contact.from_header,
from_header,
)
contact.from_header = from_header
db.session.commit()
else:
LOG.d(
"create contact %s for alias %s",
contact_email,
alias,
)
try:
contact = Contact.create(
user_id=alias.user_id,
alias_id=alias.id,
website_email=contact_email,
name=contact_name,
mail_from=mail_from,
from_header=from_header,
reply_email=generate_reply_email(contact_email, alias.user)
if is_valid_email(contact_email)
else NOREPLY,
)
if not contact_email:
LOG.d("Create a contact with invalid email for %s", alias)
contact.invalid_email = True
db.session.commit()
except IntegrityError:
LOG.warning("Contact %s %s already exist", alias, contact_email)
db.session.rollback()
contact = Contact.get_by(alias_id=alias.id, website_email=contact_email)
return contact
def replace_header_when_forward(msg: Message, alias: Alias, header: str):
"""
Replace CC or To header by Reply emails in forward phase
"""
new_addrs: [str] = []
headers = msg.get_all(header, [])
for contact_name, contact_email in getaddresses(headers):
# convert back to original then parse again to make sure contact_name is unicode
addr = formataddr((contact_name, contact_email))
contact_name, contact = parseaddr_unicode(addr)
# no transformation when alias is already in the header
if contact_email == alias.email:
new_addrs.append(addr)
continue
if not is_valid_email(contact_email):
LOG.warning("invalid contact email %s. %s. Skip", contact_email, headers)
continue
contact = Contact.get_by(alias_id=alias.id, website_email=contact_email)
if contact:
# update the contact name if needed
if contact.name != contact_name:
LOG.d(
"Update contact %s name %s to %s",
contact,
contact.name,
contact_name,
)
contact.name = contact_name
db.session.commit()
else:
LOG.debug(
"create contact for alias %s and email %s, header %s",
alias,
contact_email,
header,
)
try:
contact = Contact.create(
user_id=alias.user_id,
alias_id=alias.id,
website_email=contact_email,
name=contact_name,
reply_email=generate_reply_email(contact_email, alias.user),
is_cc=header.lower() == "cc",
from_header=addr,
)
db.session.commit()
except IntegrityError:
LOG.warning("Contact %s %s already exist", alias, contact_email)
db.session.rollback()
contact = Contact.get_by(alias_id=alias.id, website_email=contact_email)
new_addrs.append(contact.new_addr())
if new_addrs:
new_header = ",".join(new_addrs)
LOG.d("Replace %s header, old: %s, new: %s", header, msg[header], new_header)
add_or_replace_header(msg, header, new_header)
else:
LOG.d("Delete %s header, old value %s", header, msg[header])
delete_header(msg, header)
def replace_header_when_reply(msg: Message, alias: Alias, header: str):
"""
Replace CC or To Reply emails by original emails
"""
new_addrs: [str] = []
for _, reply_email in getaddresses(msg.get_all(header, [])):
# no transformation when alias is already in the header
if reply_email == alias.email:
continue
contact = Contact.get_by(reply_email=reply_email)
if not contact:
LOG.warning(
"%s email in reply phase %s must be reply emails", header, reply_email
)
# still keep this email in header
new_addrs.append(reply_email)
else:
new_addrs.append(formataddr((contact.name, contact.website_email)))
if new_addrs:
new_header = ",".join(new_addrs)
LOG.d("Replace %s header, old: %s, new: %s", header, msg[header], new_header)
add_or_replace_header(msg, header, new_header)
else:
LOG.d("delete the %s header. Old value %s", header, msg[header])
delete_header(msg, header)
def should_append_alias(msg: Message, address: str):
"""whether an alias should be appended to TO header in message"""
# # force convert header to string, sometimes addrs is Header object
if msg["To"] and address.lower() in str(msg["To"]).lower():
return False
if msg["Cc"] and address.lower() in str(msg["Cc"]).lower():
return False
return True
def prepare_pgp_message(
orig_msg: Message, pgp_fingerprint: str, public_key: str, can_sign: bool = False
) -> Message:
msg = MIMEMultipart("encrypted", protocol="application/pgp-encrypted")
# clone orig message to avoid modifying it
clone_msg = copy(orig_msg)
# copy all headers from original message except all standard MIME headers
for i in reversed(range(len(clone_msg._headers))):
header_name = clone_msg._headers[i][0].lower()
if header_name.lower() not in _MIME_HEADERS:
msg[header_name] = clone_msg._headers[i][1]
# Delete unnecessary headers in clone_msg except _MIME_HEADERS to save space
delete_all_headers_except(
clone_msg,
_MIME_HEADERS,
)
if clone_msg["Content-Type"] is None:
LOG.d("Content-Type missing")
clone_msg["Content-Type"] = "text/plain"
if clone_msg["Mime-Version"] is None:
LOG.d("Mime-Version missing")
clone_msg["Mime-Version"] = "1.0"
first = MIMEApplication(
_subtype="pgp-encrypted", _encoder=encoders.encode_7or8bit, _data=""
)
first.set_payload("Version: 1")
msg.attach(first)
if can_sign and PGP_SENDER_PRIVATE_KEY:
LOG.d("Sign msg")
clone_msg = sign_msg(clone_msg)
# use pgpy as fallback
second = MIMEApplication(
"octet-stream", _encoder=encoders.encode_7or8bit, name="encrypted.asc"
)
second.add_header("Content-Disposition", 'inline; filename="encrypted.asc"')
# encrypt
# use pgpy as fallback
msg_bytes = to_bytes(clone_msg)
try:
encrypted_data = pgp_utils.encrypt_file(BytesIO(msg_bytes), pgp_fingerprint)
second.set_payload(encrypted_data)
except PGPException:
LOG.warning("Cannot encrypt using python-gnupg, use pgpy")
encrypted = pgp_utils.encrypt_file_with_pgpy(msg_bytes, public_key)
second.set_payload(str(encrypted))
msg.attach(second)
return msg
def sign_msg(msg: Message) -> Message:
container = MIMEMultipart(
"signed", protocol="application/pgp-signature", micalg="pgp-sha256"
)
container.attach(msg)
signature = MIMEApplication(
_subtype="pgp-signature", name="signature.asc", _data="", _encoder=encode_noop
)
signature.add_header("Content-Disposition", 'attachment; filename="signature.asc"')
try:
signature.set_payload(sign_data(to_bytes(msg).replace(b"\n", b"\r\n")))
except Exception:
LOG.exception("Cannot sign, try using pgpy")
signature.set_payload(
sign_data_with_pgpy(to_bytes(msg).replace(b"\n", b"\r\n"))
)
container.attach(signature)
return container
def handle_email_sent_to_ourself(alias, mailbox, msg: Message, user):
# store the refused email
random_name = str(uuid.uuid4())
full_report_path = f"refused-emails/cycle-{random_name}.eml"
s3.upload_email_from_bytesio(full_report_path, BytesIO(to_bytes(msg)), random_name)
refused_email = RefusedEmail.create(
path=None, full_report_path=full_report_path, user_id=alias.user_id
)
db.session.commit()
LOG.d("Create refused email %s", refused_email)
# link available for 6 days as it gets deleted in 7 days
refused_email_url = refused_email.get_url(expires_in=518400)
send_email_at_most_times(
user,
ALERT_SEND_EMAIL_CYCLE,
mailbox.email,
f"Email sent to {alias.email} from its own mailbox {mailbox.email}",
render(
"transactional/cycle-email.txt",
name=user.name or "",
alias=alias,
mailbox=mailbox,
refused_email_url=refused_email_url,
),
render(
"transactional/cycle-email.html",
name=user.name or "",
alias=alias,
mailbox=mailbox,
refused_email_url=refused_email_url,
),
)
def handle_forward(envelope, msg: Message, rcpt_to: str) -> List[Tuple[bool, str]]:
"""return an array of SMTP status (is_success, smtp_status)
is_success indicates whether an email has been delivered and
smtp_status is the SMTP Status ("250 Message accepted", "550 Non-existent email address", etc)
"""
address = rcpt_to # alias@SL
alias = Alias.get_by(email=address)
if not alias:
LOG.d("alias %s not exist. Try to see if it can be created on the fly", address)
alias = try_auto_create(address)
if not alias:
LOG.d("alias %s cannot be created on-the-fly, return 550", address)
return [(False, "550 SL E3 Email not exist")]
user = | |
""" Plotting of conformation distributions. """
import copy
import itertools
import math
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from scipy.stats import entropy, gaussian_kde
from scipy.spatial import distance_matrix
from typing import Dict, List, Union
from typing_extensions import Literal
from rdkit import Chem
from rdkit.Chem import AllChem, rdMolTransforms, rdchem, rdmolops
from rdkit.Chem.rdDistGeom import GetMoleculeBoundsMatrix
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem.Lipinski import RotatableBondSmarts
import seaborn as sns
# noinspection PyPackageRequirements
from tap import Tap
from conformation.compare_pairwise_distance_histograms import compute_energy_weights
class Args(Tap):
"""
System arguments.
"""
data_path: str # Path to RDKit binary file containing conformations
num_energy_decimals: int = 3 # Number of energy decimals used for computing empirical minimized energy probability
weights: bool = False # Whether or not to weight histograms by empirical Boltzmann probability
temp: float = 300.0 # Temperature for Boltzmann weighting (weights = True)
svd_tol: float = 1e-5 # Tolerance below which a singular value is considered 0.
hist_bin_width: float = 0.1 # Bin width for histograms
corr_heatmap_font_scale: float = 0.4 # Font scale for pairwise torsion correlations heatmap
mode_count_font_scale: float = 0.6 # Font scale for pairwise torsion correlations heatmap
mode_count_dpi: int = 200
corr_heatmap_annot_size: float = 6.0 # Font size for annotations in pairwise torsion correlations heatmap
corr_heatmap_dpi: int = 200 # DPI for pairwise torsion correlations heatmap
joint_hist_bw_adjust: float = 0.25 # KDE bw_adjust value for pairwise joint histogram of torsions plot
entropy_bins: int = 10
save_dir: str # Path to directory containing output files
# noinspection PyUnresolvedReferences
def compute_energy(mol: rdchem.Mol, minimize: bool = False) -> pd.DataFrame:
"""
Compute MMFF energy of each conformation.
:param mol: RDKit mol object containing conformations.
:param minimize: Whether or not to compute minimized energy.
:return: Dataframe.
"""
mol = Chem.Mol(mol)
if minimize:
max_iters = 200
else:
max_iters = 0
res = AllChem.MMFFOptimizeMoleculeConfs(mol, maxIters=max_iters, numThreads=0)
energies = []
for i in range(len(res)):
energies.append(res[i][1])
energies = np.array(energies)
df = pd.DataFrame(energies)
if minimize:
df = df.rename(columns={0: "Minimized Energy (kcal/mol)"})
else:
df = df.rename(columns={0: "Energy (kcal/mol)"})
return df
# noinspection PyUnresolvedReferences
def compute_torsions(mol: rdchem.Mol, bonds: np.ndarray) -> pd.DataFrame:
"""
Compute torsion angles for a set of bonds defined by pairs of atoms.
:param mol: RDKit mol object containing conformations.
:param bonds: Bonds defined by begin and end atoms.
:return: Dataframe.
"""
atom_indices = []
column_names = dict()
for i, bond in enumerate(bonds):
# Get atom indices for the ith bond
atom_a_idx = int(bond[0])
atom_b_idx = int(bond[1])
atom_a_symbol = mol.GetAtomWithIdx(atom_a_idx).GetSymbol()
atom_b_symbol = mol.GetAtomWithIdx(atom_b_idx).GetSymbol()
# Select a neighbor for each atom in order to form a dihedral
atom_a_neighbors = mol.GetAtomWithIdx(atom_a_idx).GetNeighbors()
atom_a_neighbor_index = [x.GetIdx() for x in atom_a_neighbors if x.GetIdx() != atom_b_idx][0]
atom_b_neighbors = mol.GetAtomWithIdx(atom_b_idx).GetNeighbors()
atom_b_neighbor_index = [x.GetIdx() for x in atom_b_neighbors if x.GetIdx() != atom_a_idx][0]
atom_indices.append([atom_a_neighbor_index, atom_a_idx, atom_b_idx, atom_b_neighbor_index])
column_names[i] = f'{bond[0]}-{bond[1]} | {atom_a_symbol} {atom_b_symbol}'
results = None
for i in range(len(bonds)):
angles = []
for j in range(mol.GetNumConformers()):
c = mol.GetConformer(j)
angles.append(rdMolTransforms.GetDihedralRad(c, atom_indices[i][0], atom_indices[i][1],
atom_indices[i][2], atom_indices[i][3]))
angles = np.array(angles)
if i == 0:
results = angles[:, np.newaxis]
else:
# noinspection PyUnboundLocalVariable
results = np.concatenate((results, angles[:, np.newaxis]), axis=1)
df = pd.DataFrame(results)
df = df.rename(columns=column_names)
return df
# noinspection PyUnresolvedReferences
def compute_rotatable_bond_torsions(mol: rdchem.Mol) -> pd.DataFrame:
"""
Compute torsion angles for rotatable bonds.
:param mol: RDKit mol object containing conformations.
:return: Dataframe.
"""
rotatable_bonds = mol.GetSubstructMatches(RotatableBondSmarts)
df = compute_torsions(mol, np.array(rotatable_bonds))
return df
# noinspection PyUnresolvedReferences
def compute_aromatic_ring_bond_torsions(mol: rdchem.Mol) -> pd.DataFrame:
"""
Compute torsion angles for aromatic ring bonds.
:param mol: RDKit mol object containing conformations.
:return: Dataframe.
"""
aromatic_bonds = []
for bond in mol.GetBonds():
if bond.GetBeginAtom().GetIsAromatic() and bond.GetEndAtom().GetIsAromatic():
aromatic_bonds.append([bond.GetBeginAtom().GetIdx(), bond.GetEndAtom().GetIdx()])
df = compute_torsions(mol, np.array(aromatic_bonds))
return df
# noinspection PyUnresolvedReferences
def compute_non_aromatic_ring_bond_torsions(mol: rdchem.Mol) -> pd.DataFrame:
"""
Compute torsion angles for non-aromatic ring bonds.
:param mol: RDKit mol object containing conformations.
:return: Dataframe.
"""
rotatable_bonds = mol.GetSubstructMatches(RotatableBondSmarts)
non_aromatic_ring_bonds = []
for bond in mol.GetBonds():
if not bond.GetBeginAtom().GetIsAromatic() or not bond.GetEndAtom().GetIsAromatic():
if (bond.GetBeginAtom().GetIdx(), bond.GetEndAtom().GetIdx()) not in rotatable_bonds:
if bond.IsInRing():
non_aromatic_ring_bonds.append([bond.GetBeginAtom().GetIdx(), bond.GetEndAtom().GetIdx()])
df = compute_torsions(mol, np.array(non_aromatic_ring_bonds))
return df
# noinspection PyUnresolvedReferences
def compute_non_rotatable_non_ring_bond_torsions(mol: rdchem.Mol) -> pd.DataFrame:
"""
Compute torsion angles for non-rotatable non-ring bonds.
:param mol: RDKit mol object containing conformations.
:return: Dataframe.
"""
rotatable_bonds = mol.GetSubstructMatches(RotatableBondSmarts)
non_rotatable_non_ring_bonds = []
for bond in mol.GetBonds():
if (bond.GetBeginAtom().GetIdx(), bond.GetEndAtom().GetIdx()) not in rotatable_bonds:
if not bond.IsInRing() and len(bond.GetBeginAtom().GetNeighbors()) > 1 and \
len(bond.GetEndAtom().GetNeighbors()) > 1:
non_rotatable_non_ring_bonds.append([bond.GetBeginAtom().GetIdx(), bond.GetEndAtom().GetIdx()])
df = compute_torsions(mol, np.array(non_rotatable_non_ring_bonds))
return df
# noinspection PyUnresolvedReferences
def compute_distances(mol: rdchem.Mol) -> pd.DataFrame:
"""
Compute atomic pairwise distances.
:param mol: RDKit mol object containing conformations.
:return: DataFrame.
"""
num_atoms = mol.GetNumAtoms()
distances = []
column_names = dict()
results = None
for i in range(mol.GetNumConformers()):
pos = mol.GetConformer(i).GetPositions()
dist_mat = distance_matrix(pos, pos)
tmp = []
for j, k in itertools.combinations(np.arange(num_atoms), 2):
tmp.append(dist_mat[j][k])
distances.append(tmp)
distances = np.array(distances).transpose()
for i, pair in enumerate(itertools.combinations(np.arange(num_atoms), 2)):
j, k = pair
if results is None:
results = distances[i][:, np.newaxis]
else:
results = np.concatenate((results, distances[i][:, np.newaxis]), axis=1)
column_names[i] = f'Distance {j}-{k} (A)'
df = pd.DataFrame(results)
df = df.rename(columns=column_names)
return df
def compute_num_torsion_modes(df: pd.DataFrame, shift: float = 0.1, bw_method: float = 0.1) -> pd.DataFrame:
"""
Compute the number of torsion modes for a set of torsion distributions. The rows of the input DataFrame
correspond to conformations, and the columns correspond to bonds in the molecule. A distribution of torsion
angles for each column is calculated via a kernel density estimate, and the number of modes for a given estimate is
computed using a numerical first derivative of the estimate. Each distribution is shifted by a fixed amount
from 0 to 2\pi, the minimum mode count amongst all of these windows is recorded.
:param df: DataFrame containing torsion angles (# confs x # bonds).
:param shift: Amount (radians) by which to do incremental modular shifts of the distribution.
:param bw_method: Estimator bandwidth (kde.factor).
:return: DataFrame containing the mode count for each column of the input. Column 0 of this dataframe contains
the bond name (corresponding to input DataFrame column name), and column 1 contains the mode count.
"""
positions = np.arange(0.0, 2 * math.pi, shift)
mode_counts = []
for i in range(df.shape[1]):
min_count = float('inf')
for k in positions:
count = 0
# Compute the kernel estimate
kernel = gaussian_kde((df.iloc[:, i].to_numpy() + math.pi + k) % (2 * math.pi), bw_method=bw_method)
# Compute the kernel value at points between 0 and 2\pi
Z = kernel(positions)
# Compute the first derivative and its sign
diff = np.gradient(Z)
s_diff = np.sign(diff)
# Locate zero crossings and check where the crossing corresponds to a local maximum of the kernel estimate
zc = np.where(s_diff[:-1] != s_diff[1:])[0]
for j in zc:
if s_diff[:-1][j] == 1.0 and s_diff[1:][j] == -1.0:
count += 1
# Record the smallest mode counts
if count < min_count:
min_count = count
mode_counts.append([df.columns[i], min_count])
df = pd.DataFrame(mode_counts)
df = df.rename(columns={0: "Bond", 1: "Mode Count"})
return df
def compute_torsion_entropy(df: pd.DataFrame, bin_width: float = 0.1, zero_level: float = 1e-10) -> pd.DataFrame:
"""
Compute entropy of the torsion angles in each column of a DataFrame via a histogram.
:param df: DataFrame containing torsion angles (# confs x # bonds).
:param bin_width: Histogram bin width for the histogram used to compute entropy.
:param zero_level: Replace 0 values in the histogram with this number to avoid computing log of 0 in entropy.
:return: DataFrame containing the entropy for each column of the input. Column 0 of this dataframe contains
the bond name (corresponding to input DataFrame column name), and column 1 contains the entropy.
"""
entropies = []
for i in range(df.shape[1]):
hist = np.histogram(df.iloc[:, i].to_numpy(), bins=np.arange(-math.pi, math.pi, bin_width), density=True)[0]
hist = np.where(hist == 0, zero_level, hist)
entropies.append([df.columns[i], entropy(hist)])
df = pd.DataFrame(entropies)
df = df.rename(columns={0: "Bond", 1: "Entropy"})
return df
# noinspection PyUnresolvedReferences
def plot_torsion_joint_histograms(df: pd.DataFrame, weights: np.ndarray = None, bin_width: float = 0.1,
joint_hist_bw_adjust: float = 0.25) -> matplotlib.figure.Figure:
"""
Plot pairwise joint histogram of all torsion distributions in the given DataFrame.
:param df: DataFrame of torsion angles for a set of conformations and bonds (# conformations x # bonds).
:param weights: Histogram weights.
:param bin_width: Histogram bin width.
:param joint_hist_bw_adjust: bw_adjust value for kernel density estimate in lower triangle of grid.
:return: | |
"""
Description: IDA Python script to recursively descend through ScatterBee shellcode and rebuild an analysable binary. Output can be loaded into IDA with ScatterLoader.py
Author: @malworms
License:
Copyright 2021 PwC UK
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import idaapi, idc, idautils, struct
#script to extract original binaries from ScatterBee encoded binaries used by ShadowPad
#assumes the cursor is on the entry point to the shellcode
#requires the binary to have a code section and a data section
#code section should be the start of the shellcode up until the start of the relocations
#data section should be from the relocations up until the end of the binary
#set of addresses flagged for analysis
todo_addrs = set()
#add the first instruction
todo_addrs.add(idc.here())
#set of addresses that have already been analysed
used_addrs = set()
#list of chains of code execution
chains = []
#special high value used to differentiate normal jumps from fake ones
#not used in most binaries but there just in case a binary has multiple flows of instructions
#into the same instruction
fix_addrs = 0xffffffffffff0000
#base address is the lowest code address of this section
#base the new code at the same address as the old code so that offsets into the data section match
base_addr = idaapi.getseg(idc.here()).start_ea
#offset instructions: list of instructions that are known to be used by ScatterBee to load memory addresses for later use
offset_insns = [idaapi.NN_lea, idaapi.NN_push, idaapi.NN_mov, idaapi.NN_movzx, idaapi.NN_fstp, idaapi.NN_fdiv, idaapi.NN_fcomp,
idaapi.NN_fcom, idaapi.NN_fst, idaapi.NN_fsubr, idaapi.NN_fild, idaapi.NN_fld, idaapi.NN_adc, idaapi.NN_movsx,
idaapi.NN_movsxd, idaapi.NN_cmovnz, idaapi.NN_cmp, idaapi.NN_and, idaapi.NN_inc, idaapi.NN_fadd, idaapi.NN_fmul]
#none offset instructions: list of insns to explicitly ignore when checking code flow
none_offset_insns = [idaapi.NN_add, idaapi.NN_xor, idaapi.NN_pop, idaapi.NN_sub, idaapi.NN_test,
idaapi.NN_dec, idaapi.NN_setz, idaapi.NN_imul, idaapi.NN_nop, idaapi.NN_or, idaapi.NN_bt]
#terrible python, but it does the job of getting the next instruction to analyse
def get_next_addr():
global todo_addrs
global used_addrs
cur_addr = idaapi.BADADDR
while len(todo_addrs) > 0:
cur_addr = todo_addrs.pop()
while cur_addr in used_addrs:
if len(todo_addrs) == 0:
cur_addr = idaapi.BADADDR
break
else:
cur_addr = todo_addrs.pop()
if cur_addr != idaapi.BADADDR:
break
return cur_addr
def InfoMsg(string):
print ("[INFO]: " + string)
def in_code_section(ea):
if idaapi.getseg(ea) is not None and idaapi.getseg(ea).type == 2:
return True
return False
def get_pointer(ea):
if idaapi.get_inf_structure().is_64bit():
return idaapi.get_qword(ea)
else:
return idaapi.get_dword(ea)
def use_addresses(ea, size):
global used_addrs
for i in range(ea, ea+size):
used_addrs.add(i)
#checks if current address is an obfuscated jump based on metadata added by ScatterJump.py
def is_fudge_call(ea):
insn = idaapi.insn_t()
idaapi.decode_insn(insn, ea)
if insn.itype == idaapi.NN_jmp and insn.size == 9:
return True
return False
#jumps through obfuscated calls until it gets to a normal instruction
def get_fudge_dest(ea):
while is_fudge_call(ea):
insn = idaapi.insn_t()
idaapi.decode_insn(insn, ea)
ea = insn.Op1.addr
return ea
#naive check for known bytes at start of functions
def is_func_start(ea):
if idaapi.get_inf_structure().is_64bit():
if idaapi.get_byte(ea) == 0x55:
return True
if idaapi.get_byte(ea) == 0x51:
return True
if idaapi.get_byte(ea) == 0x53:
return True
if idaapi.get_byte(ea) == 0x52:
return True
if idaapi.get_byte(ea) == 0xe9:
return True
if idaapi.get_word(ea) == 0x8948:
return True
if idaapi.get_word(ea) == 0x8148:
return True
if idaapi.get_word(ea) == 0x8348:
return True
if idaapi.get_word(ea) == 0x8B48:
return True
if idaapi.get_word(ea) == 0x8B4c:
return True
if idaapi.get_word(ea) == 0xb70f:
return True
else:
if idaapi.get_byte(ea) == 0x55 or idaapi.get_byte(ea) == 0x68:
return True
return False
#class to hold information about an original instruction from an obfuscated binary
class ChainLink:
def __init__(self, addr, flow, jump, stack_cmp, assembly, needs_reloc, data_ref):
self.addr = addr
self.new_addr = None
self.flow = flow
self.jump = jump
self.nextLink = None
self.prevLink = None
self.skip = False
self.new_data_ref = None
self.stack_cmp = stack_cmp
self.assembly = assembly
self.needs_reloc = needs_reloc
self.data_ref = data_ref
self.new_data_ref = None
def addNext(self, node):
self.nextLink = node
node.prevLink = self
#class to hold multiple sequential original instructions
class Chain:
def __init__(self, link=None):
self.startLink = link
self.endLink = link
def addLink(self, link):
if self.startLink == None:
self.startLink = link
self.endLink = link
return True
self.endLink.addNext(link)
self.endLink = link
return True
def appendChain(self, chain):
curLink = chain.startLink
if self.endLink.flow != curLink.addr:
InfoMsg("Chains don't match on append")
print(hex(self.endLink.addr), hex(curLink.addr))
print (self.endLink.flow)
return
self.endLink.nextLink = chain.startLink
chain.startLink.prevLink = self.endLink
self.endLink = chain.endLink
def print(self):
link = self.startLink
while link is not None:
print (hex(link.addr), idc.GetDisasm(link.addr))
link = link.nextLink
#main logic for following a flow of code and storing the relevant information about it
def parse_flow(cur_ea):
#start the current flow chain
chain = Chain()
#grab the current global sets, lists and values
global used_addrs
global todo_addrs
global chains
global fix_addrs
#these are parsed from jumps in control flow, typically branches
#are jumps to code in the same function so try to group them together by
#analysing them in this flow
close_jumps = []
#these are code references that are outside typical control flow
#add them to the list of addresses still to analyse after the current flow has been finished
jumps = []
while True:
insn = idaapi.insn_t()
could_decode = idaapi.decode_insn(insn, cur_ea)
if could_decode == 0:
#failed to decode instruction, pass location of error to user and carry on
InfoMsg("Failed to decode %08x, aborting this chain and continuing, ensure to check for errors in the final file" % cur_ea)
return
use_addresses(cur_ea, insn.size)
#decoded the current instruction, now pull out all the information about control flow from items
###
#capture the target of the next instruction executed in flow
#absolute jumps, and int 3s do not have flow currently
flow = None
#if the instruction can branch control flow to a new place capture it in this variable
#currently don't support jump tables, but equally haven't found any itw yet
jump = None
#if the instruction has a reference to memory or code that may be used indirectly then capture it here
data_ref = None
#if the instruction is a "cmp rsp, 0D744h" then the next instruction is a bl jump to an illegal position
#flag this in the chain to prevent bad control flow being taken
stack_cmp = False
#list to hold temporary instruction bytes
assembly = idaapi.get_bytes(insn.ea, insn.size)
#flag for if this instruction will need a fixup applying
needs_reloc = False
#calls are straightforward, calculate the destination of the jump, and the destination of the flow
if insn.itype == idaapi.NN_call:
#assume all calls return - may not be a good assumption...
flow = get_fudge_dest(insn.ea+insn.size)
if insn.Op1.type == idaapi.o_near:
jump = get_fudge_dest(insn.Op1.addr)
jumps.append(get_fudge_dest(insn.Op1.addr))
needs_reloc = True
elif insn.Op1.type != idaapi.o_reg:
InfoMsg("Unsupported call at %08x" % insn.ea)
return
else:
#this is a "call eax" or similar
pass
#indirect calls come in two forms, both can be parsed in the same way
elif (insn.itype == idaapi.NN_callni or insn.itype == idaapi.NN_callfi):
flow = get_fudge_dest(insn.ea+insn.size)
if insn.Op1.type == idaapi.o_mem:
needs_reloc = True
#the reference is to uninitialised data, ignore it
if get_pointer(insn.Op1.addr) == 0:
needs_reloc = False
elif is_func_start(get_fudge_dest(get_pointer(insn.Op1.addr))):
jump = get_fudge_dest(get_pointer(insn.Op1.addr))
jumps.append(get_fudge_dest(get_pointer(insn.Op1.addr)))
assembly = b"\xe8\x90\x90\x90\x90"
else:
#this happens a lot when rebuilding unpatched ScatterBee samples
InfoMsg("Unsupported call at %08x" % insn.ea)
InfoMsg("%08x" % get_pointer(insn.Op1.addr))
needs_reloc = False
#return
else:
pass
#jumps have no flow, try to keep the target near to this chain as jump targets are usually inside the same function
elif insn.itype == idaapi.NN_jmp:
jump = get_fudge_dest(insn.Op1.addr)
close_jumps.append(get_fudge_dest(insn.Op1.addr))
needs_reloc = True
#if they have used a small jump instruction, replace with a large one to guarantee we can reach the relative target in the final output
if insn.size == 2:
assembly = b"\xe9\x90\x90\x90\x90"
#same for indirect jumps, but handle the loading form memory to calculate the destination
elif insn.itype == idaapi.NN_jmpfi or insn.itype == idaapi.NN_jmpni:
if insn.Op1.type == idaapi.o_mem:
if is_func_start(get_fudge_dest(get_pointer(insn.Op1.addr))):
jump = get_fudge_dest(get_pointer(insn.Op1.addr))
close_jumps.append(get_fudge_dest(get_pointer(insn.Op1.addr)))
assembly = b"\xe9\x90\x90\x90\x90"
needs_reloc = True
else:
pass
#never come across this jump type so don't know how to handle it
elif insn.itype == idaapi.NN_jmpshort:
InfoMsg("Encountered jmpshort at %08x, needs implementing. ABORTING, output will be invalid" % insn.ea)
return
| |
must be in self.model.x, output in self.model.y, loss in self.model.loss, and training using self.model.train_step '''
logger.info("fitting quadric model")
self.fitted_qs = []
log_str = "epoch: {:06d} ::: loss: {:.02e}"
for epoch in range(self.n_epochs):
batcher = tensorflow_models.np_batcher(X.shape[0], self.batch_size)
epoch_loss = 0
for batch in batcher:
fitted_q = np.zeros((self.input_shape+1,self.input_shape+1))
fitted_q[np.triu_indices(self.input_shape+1)] = self.sess.run(self.model.fq)
fitted_q = fitted_q.T
self.fitted_qs.append(fitted_q)
#if (self.log_epochs): logger.info("fitted_q:: {}".format(str(fitted_q)))
feed_dict={
self.model.x : X[batch]
}
if self.weighted:
feed_dict[self.model.sample_weight] = sample_weight[batch]
feed_dict.update(feed_dict_extras)
loss, _ = self.sess.run(
(self.model.loss, self.model.train_step),
feed_dict
)
epoch_loss += loss.sum()
if self.log_epochs and not (isinstance(self.log_epochs, float) and epoch%int(1/self.log_epochs)):
logger.info(log_str.format(epoch, epoch_loss.sum()))
logger.info("finished fitting :::: loss: " + str(loss.sum()))
self.final_loss = loss.sum()
return self
class TFQuadric(sklearn.base.BaseEstimator, tensorflow_models.PickleableTFModel):
''' A zero hidden layer NN Classifier (i.e. Logit Regression).
Parameters:
n_epochs: number of training epochs
learning_rate: make it bigger to learn faster, at the risk of killing your relu
trainable: set to false if you want to not allow training. For example, if you want to use this as part of another network
batch_size: defaults to the entire dataset.
'''
def __init__(self, n_epochs=300, learning_rate=0.05, trainable=True, batch_size=None,
log_epochs=False, model=None, weighted=True, input_shape=3, parabolic_constraint=False):
global TFQuadricGraph
global TFQuadricSesh
try:
TFQuadricGraph
except:
TFQuadricGraph = tf.Graph()
try:
TFQuadricSesh
except:
TFQuadricSesh = tf.Session(graph = TFQuadricGraph)
self.sess = TFQuadricSesh
self.graph = TFQuadricGraph
self.n_epochs = n_epochs
self.learning_rate = learning_rate
self.trainable = trainable
self.batch_size = batch_size
self.log_epochs = log_epochs
self.model = model
self.input_shape = input_shape
self.weighted = weighted
#self.init_q = np.zeros((9,))
self.init_q = np.zeros((10,))
self.parabolic_constraint = parabolic_constraint
if self.model is None:
print("building!")
self.build_model()
super().__init__()
def fit(self, X, y=None, sample_weight=None, beta0=None,**fit_params):
''' Puts inputs into np format, initializes a session, builds the graph and then calls `.fit_` which can be overridden by individual models '''
X = sklearn.utils.check_array(X)
self.init_vars_(X,y,sample_weight,beta0)
with self.graph.as_default():
#self.blah = self.sess.run(self.model.blah, feed_dict={self.model.x:X})
fitted_model = self.fit_(X, sample_weight=sample_weight, **fit_params)
self.coef_ = self.sess.run(self.model.fq)
self.intercept_ = np.array([])
return fitted_model
def init_vars_(self, X, y=None, sample_weight=None, beta0=None):
if beta0 is None:
beta0 = self.fast_reasonable_q(X,sample_weight)
#beta0 /= beta0[-1] #TODO If the point lies ON the quadric, the remaining variables go to inf!!!! This is probably a problem for the iterated method
#self.init_q = beta0[:-1]
self.init_q = beta0#/np.linalg.norm(beta0)
#print(self.init_q.shape)
with self.graph.as_default():
self.sess.run(self.model.initializer)
self.sess.run(self.model.fq_feed, feed_dict={self.model.fq_placeholder: self.init_q})
def predict(self, X, return_dists=False):
X = sklearn.utils.check_array(X)
with self.graph.as_default():
#print(self.sess.run(self.model.fq))
self.sess.run(self.model.fq_feed, feed_dict={self.model.fq_placeholder: self.coef_})
#print(self.sess.run(self.model.fq))
projections, dists = self.sess.run((self.model.orthogonal_projections, self.model.dists), feed_dict={self.model.x:X})
if return_dists:
return projections, dists
return projections
def build_model(self):
''' Initializes a new graph, and then calls the .build_model_ method, which must be implemented by a TFEstimator '''
self.model = tensorflow_models.NameSpace()
with self.graph.as_default():
self.build_model_()
def build_model_(self):
''' The actual network architecture '''
with tf.name_scope("quadric_regression"):
with tf.name_scope("input"):
input_dim = [None, self.input_shape]
self.model.x = tf.placeholder(tf.complex128, shape=input_dim, name="input")
if self.weighted:
self.model.sample_weight = tf.placeholder(tf.float64, shape=[None], name="sample_weight")
with tf.name_scope("quadric_projection"):
if self.parabolic_constraint:
self.model.fq = tf.Variable(self.init_q, name="flattened_quadric", trainable=self.trainable,
constraint=parabolic_constraint)
else:
self.model.fq = tf.Variable(self.init_q, name="flattened_quadric", trainable=self.trainable,
constraint=unit_norm_constraint)
print(self.model.fq)
self.model.fq_placeholder = tf.placeholder(tf.float64, shape=self.init_q.shape)
self.model.fq_feed = self.model.fq.assign(self.model.fq_placeholder)
q = nptriu(self.model.fq)
#TODO: I removed a /2 here and shit fell apart
self.model.q = q + tf.transpose(q) - tf.diag(tf.diag_part(q))
print(self.model.q)
if not self.model.q.dtype.is_complex:
self.model.qc = tf.cast(self.model.q, tf.complex128 if self.model.q.dtype == tf.float64 else tf.complex64)
else:
self.model.qc = self.model.q
print(self.model.qc)
self.model.orthogonal_projections, self.model.complex_dists = tf_ortho_project(
self.model.qc, self.model.x, imag_0tol=np.inf)
print(self.model.orthogonal_projections)
print(self.model.complex_dists)
self.model.dists = tf.real(self.model.complex_dists)
if self.trainable:
with tf.name_scope("training"):
if not self.weighted:
self.model.loss = tf.reduce_sum(self.model.dists, name="loss")
else:
self.model.loss = tf.reduce_sum(self.model.dists*self.model.sample_weight, name="loss")
#self.model.loss = tf.log(self.model.loss) #TODO: this is kinda jank.
self.model.optimizer = tf.train.AdamOptimizer(self.learning_rate, name="optimizer")
self.model.grads = self.model.optimizer.compute_gradients(self.model.loss)
self.model.grad_application = self.model.optimizer.apply_gradients(self.model.grads)
with tf.control_dependencies([self.model.grad_application]):
self.model.train_step = tf.no_op(name="train_step")
self.model.initializer = tf.global_variables_initializer()
return self.model
def fast_reasonable_q(self, X, sample_weight=None):
#this allows us to orient the data to set an initial set of parameters
global_linear_model = TLS_models.LinearODR_mD(2)
global_linear_model.fit(X, sample_weight=sample_weight)
global_linear_vecs = global_linear_model.cov_eigenvectors[global_linear_model.cov_eigenvalues_sorter]
global_linear_std = global_linear_model.cov_eigenvalues[global_linear_model.cov_eigenvalues_sorter]
global_linear_mean = global_linear_model.intercept_
#print(global_linear_std)
transformed_X = (X-global_linear_mean)@global_linear_vecs.T
transformed_q = np.zeros((4,4))
if False:
# initialize to the best elliptic paraboloid
transformed_q[np.diag_indices(3)] = 1/global_linear_model.cov_eigenvalues[global_linear_model.cov_eigenvalues_sorter]**2
transformed_q[:3,-1] = transformed_q[-1,:3] = 1/global_linear_model.cov_eigenvalues[global_linear_model.cov_eigenvalues_sorter]
transformed_q[3,3] = np.sum(global_linear_mean)
transformed_q[2,2] = 0
if self.parabolic_constraint:
# initialize to the best plane
transformed_q[3,:3] = transformed_q[:3,3] = global_linear_vecs[-1]
transformed_q[3,3] = np.dot(global_linear_vecs[-1], -global_linear_mean)
#test stuff
transformed_q[np.diag_indices(3)] = 0.0001
transformed_q[0,0] = 0
#print(transformed_q)
#transformed_q[:3,:3] = np.array([[0,1,1],[0,0,4],[0,0,3]])
#transformed_q[:3,:3] = np.array([[0,1,1],[0,1,4],[0,0,3]])
else:
# an ellipse with axes in the various directions
transformed_q[np.diag_indices(3)] = 1/global_linear_std
# shift so that the ellipse hits the origin at the fitted plane. TODO: do we want the ellipse to open up or down??
#transformed_q[:3,-1] = transformed_q[-1,:3] = -2*np.sqrt(global_linear_std)
transformed_q[2,-1] = transformed_q[-1,2] = -2/np.sqrt(global_linear_std[2])
#transformed_q[3,3] = np.sum(
# transformed_q[np.diag_indices(3)],
# global_linear_mean**2
#)
transformed_q[3,3] = -1
#print(transformed_q)
E = np.block([[global_linear_vecs, global_linear_mean.reshape(-1,1)],[np.zeros(self.input_shape),1]])
Einv = np.linalg.inv(E)
q = Einv.T@transformed_q@Einv
q = q / np.linalg.norm(q)
#print(q)
beta0 = q[np.triu_indices(self.input_shape+1)]
#print(beta0.shape)
return beta0
def fit_(self, X, sample_weight=None, feed_dict_extras={}):
''' Trains for a number of epochs. Model input must be in self.model.x, output in self.model.y, loss in self.model.loss, and training using self.model.train_step '''
logger.info("fitting quadric model")
self.fitted_qs = []
for epoch in range(self.n_epochs):
#print(epoch)
batcher = tensorflow_models.np_batcher(X.shape[0], self.batch_size)
for batch in batcher:
fitted_q = np.zeros((self.input_shape+1,self.input_shape+1))
fitted_q[np.triu_indices(self.input_shape+1)] = self.sess.run(self.model.fq)
fitted_q = fitted_q.T
self.fitted_qs.append(fitted_q)
feed_dict={
self.model.x : X[batch]
}
if self.weighted:
feed_dict[self.model.sample_weight] = sample_weight[batch]
feed_dict.update(feed_dict_extras)
loss, grads, _ = self.sess.run(
(self.model.loss, self.model.grads, self.model.train_step),
feed_dict
)
log_str = "epoch: {:06d} ::: loss: {:08.02f} ::: grad {}"
if self.log_epochs: logger.info(log_str.format(epoch, loss.sum(),
["{:06.02f}".format(grad) for grad in grads[0][0]]))
logger.info("finished fitting :::: loss: " + str(loss.sum()))
self.final_loss = loss.sum()
return self
def test_parabolic_constraint():
parabolic_q = np.array([[1,0,0,4],[0,0,0,5],[0,0,3,6],[4,5,6,7]])
unparabolic_q = np.array([[1,0,0,4],[0,-1e-1,0,5],[0,0,3,6],[4,5,6,7]])
u = np.arange(9).reshape(3,3)
u += u.T
_, u = np.linalg.eig(u)
x = np.arange(12).reshape(4,3)*1.
parabolic_q[:3,:3] = u@parabolic_q[:3,:3]@u.T
parabolic_q = parabolic_q / np.linalg.norm(parabolic_q)
unparabolic_q[:3,:3] = u@unparabolic_q[:3,:3]@u.T
unparabolic_q = unparabolic_q / np.linalg.norm(unparabolic_q)
with tf.Graph().as_default(), tf.Session() as sess:
parabolic_constraint_test = sess.run(
parabolic_constraint(
tf.constant(
unparabolic_q[np.triu_indices(4)],
dtype=tf.float32),
unit_norm=True,
))
assert np.allclose(parabolic_constraint_test, parabolic_q[np.triu_indices(4)])
def test_tf_munge():
# test tf_munge
munge1 = np.arange(12).reshape((4,3))
munge2 = np.arange(12,20).reshape((4,2))
munge1_indices = np.array([[1,2,4]])[0]
munge2_indices = np.array([[0,3]])[0]
with tf.Graph().as_default() as g, tf.Session() as sess:
test_munge1 = tf.constant(munge1)
test_munge2 = tf.constant(munge2)
test_i1 = tf.constant(munge1_indices, dtype=tf.int32)
test_i2 = tf.constant(munge2_indices, dtype=tf.int32)
res = sess.run(tf_munge(test_munge1, test_i1, test_munge2, test_i2, axis=1))
assert np.allclose(res,
np.array([[12, 0, 1, 13, 2],
[14, 3, 4, 15, 5],
[16, 6, 7, 17, 8],
[18, 9, 10, 19, 11]]))
def test_circular_permutation():
# test circular permutation
to_rot = np.arange(12).reshape((4,3))
rot_by = np.array([0,2,-1,0])
with tf.Graph().as_default() as g, tf.Session() as sess:
test_to_rot = tf.constant(to_rot)
test_rot_by = tf.constant(rot_by)
res = sess.run(tf_circular_permutation(test_to_rot, test_rot_by))
assert np.allclose(res, np.array([[ 0, 1, 2],
[ 5, 3, 4],
[ 8, 6, 7],
[ 9, 10, 11]]))
def test_min_dist_search():
# test min_dist_search
with tf.Graph().as_default(), tf.Session() as sess:
bar = np.array([
[[2,3,4],[1,2,50.01]],
[[4,5,8],[4,5,7]],
[[100,100,100],[1+1j,1,1]]])[:,np.newaxis,:,:]
test_roots = tf.placeholder(tf.complex128, bar.shape)
sol_tf = tf_min_dist_search(test_roots,1e-1)
res_pts, res_dists = sess.run(sol_tf, feed_dict={test_roots: bar})
assert np.allclose(res_pts[:,0,:],
np.array([[2., 3., 4.],
[4., 5., 7.],
[100,100,100]]))
def test_rotate_and_translate_quadric():
# test rotate_and_translate_quadric
q = np.array([ # an elliptic paraboloid
[0.5,0,0,0],
[0,0.25,0,0],
[0,0,0.1,-1],
[0,0,-1,0.1],
])
u = np.arange(9).reshape(3,3)
u += u.T
_, u = np.linalg.eig(u)
x = np.arange(12).reshape(4,3)*1.
E = np.block([[u, x[0].reshape(-1,1)],[np.zeros(3), 1]])
Einv = np.linalg.inv(E)
q_test = Einv.T@q@Einv
with tf.Graph().as_default(), tf.Session() as sess:
test_qs = tf.placeholder(q.dtype, q.shape)
test_pts = tf.placeholder(x.dtype, x.shape)
sol_tf = tf_rotate_and_translate_quadric(test_qs, test_pts)
l,u,ul,ur,br = sess.run(sol_tf, feed_dict={test_pts: x, test_qs: q_test})
res_q = np.block([[np.diag(ul), ur[0:1].T],[ur[0:1],br[0]]])
ee = np.block([[u, x[0].reshape(-1,1)],[np.zeros(3),1]])
eeinv = np.linalg.inv(ee)
assert(np.allclose(eeinv.T@res_q@eeinv, q_test))
def test_quadric_ortho_projection():
# test quadric_ortho_projection
q = np.array([ # an elliptic paraboloid
[0.5,0,0,0],
[0,0.25,0,0],
[0,0,0.1,-1],
[0,0,-1,0.1],
])
u = np.arange(9).reshape(3,3)
u += u.T
_, u = np.linalg.eig(u)
x = np.arange(12).reshape(4,3)*1.
E = np.block([[u, x[0].reshape(-1,1)],[np.zeros(3), 1]])
Einv = np.linalg.inv(E)
q_test = Einv.T@q@Einv
with tf.Graph().as_default(), tf.Session() as sess:
test_qs = tf.placeholder(tf.float32, q.shape)
test_pts = tf.placeholder(tf.float32, x.shape)
test_qsc = tf.cast(test_qs, tf.complex128 if test_qs.dtype == tf.float64 else tf.complex64)
test_ptsc = tf.cast(test_pts, tf.complex128 if test_pts.dtype == tf.float64 else tf.complex64)
sol_tf = tf_ortho_project(test_qsc, test_ptsc, imag_0tol=1e-3)
res = sess.run(sol_tf, feed_dict={test_pts: x, test_qs: q_test})
test_res = orthogonal_quadric_projection(x,q_test,projection_funcs)
for i, r in enumerate(test_res):
assert np.allclose(r, res[i], atol=1e-4, rtol=1e-4)
def test_rotate_and_translate_quadric_planar():
planar_q = np.array([ # an elliptic paraboloid
[0,0,0,1],
[0,0,0,-2],
[0,0,0,-1],
[1,-2,-1,0.1],
])
x = np.arange(12).reshape(4,3)*1.
arbitrary_rot = np.arange(9).reshape(3,3)
arbitrary_rot += arbitrary_rot.T
_, eigen_rot = np.linalg.eig(arbitrary_rot)
E = np.block([[eigen_rot, x[0].reshape(-1,1)],[np.zeros(3), 1]])
Einv = np.linalg.inv(E)
planar_q_rot = Einv.T@planar_q@Einv
with tf.Session('') as sess:
| |
if nrCertif.subclass:
return nrCertif.subclass(*args_, **kwargs_)
else:
return nrCertif(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nrCertif', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nrCertif')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nrCertif')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nrCertif', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nrCertif'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nrCertif', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nrCertif
class dtEmisCertif(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dtEmisCertif)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dtEmisCertif.subclass:
return dtEmisCertif.subclass(*args_, **kwargs_)
else:
return dtEmisCertif(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dtEmisCertif', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dtEmisCertif')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dtEmisCertif')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dtEmisCertif', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dtEmisCertif'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dtEmisCertif', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class dtEmisCertif
class dtVencCertif(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dtVencCertif)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dtVencCertif.subclass:
return dtVencCertif.subclass(*args_, **kwargs_)
else:
return dtVencCertif(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dtVencCertif', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dtVencCertif')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dtVencCertif')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dtVencCertif', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dtVencCertif'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dtVencCertif', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class dtVencCertif
class nrProtRenov(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nrProtRenov)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nrProtRenov.subclass:
return nrProtRenov.subclass(*args_, **kwargs_)
else:
return nrProtRenov(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nrProtRenov', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nrProtRenov')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nrProtRenov')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nrProtRenov', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nrProtRenov'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nrProtRenov', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nrProtRenov
class dtProtRenov(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dtProtRenov)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dtProtRenov.subclass:
return dtProtRenov.subclass(*args_, **kwargs_)
else:
return dtProtRenov(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dtProtRenov', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dtProtRenov')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dtProtRenov')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dtProtRenov', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dtProtRenov'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dtProtRenov', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class dtProtRenov
class dtDou(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dtDou)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dtDou.subclass:
return dtDou.subclass(*args_, **kwargs_)
else:
return dtDou(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dtDou', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dtDou')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dtDou')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dtDou', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dtDou'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dtDou', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class dtDou
class pagDou(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, pagDou)
if subclass is not None:
return subclass(*args_, **kwargs_)
if pagDou.subclass:
return pagDou.subclass(*args_, **kwargs_)
else:
return pagDou(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='pagDou', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('pagDou')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='pagDou')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='pagDou', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='pagDou'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='pagDou', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
| |
<filename>pyrumpeltree.py
"""Python3 port of RumpelTree++.
This module constitutes a non capability-compatible port of RumpelTree++ to Python 3.
The pyrumpeltree module is meant as the core hashing and encoding logic for servers
and/or file-systems implementing sparse-cap designated singly attenuated Rumpelstiltskin DAG's.
For more information on sparse-cap designated singly attenuated Rumpelstiltskin DAG's, and the way
that pyrumpeltree implements these:
https://minorfs.wordpress.com/2014/02/20/rumpelstiltskin-and-his-children/
https://minorfs.wordpress.com/2014/03/21/rumpelstiltskin-and-his-children-part-2/
While the API talks of servers and clients, please take note that there isn't actually any server or
client implemented in this module. The server refers to the logic that is expected to reside inside
of 'your' server of file-system logic. And chances are you won't ever need to use the 'client' part.
The 'client' par of the API is meant for scalability of your system. While some server side operations
require use of a server side secret, some operations don't and may be offloaded to the client side of
things. The client there could be a real network client or a process using your user-space file-system.
It is possible to choose to only allow some clients to do client side operations by means of a cloud
secret that is shared between the server and the clients.
After creating a server object using the create_server function, this server object may be used to
convert sparse capabilities into Node objects. Basically any well formed sparse-cap is considered valid
by the library. The true validity should be bound to the existence of what the Node object designates.
That is, an invalid sparse capability will be a capability to a valid node that points into nothingness.
Conceptually a node consists of three properties:
* A designating sparse capability indicating full unatenuated access to the node and all its children.
* A designating sparse capability indicating attenuated access to the node and all its children.
* A storage designation
The storage designation is meant to be used in the following way:
* The storage relative path is meant to be used as obfuscated location indicator in a file-system
or database, indicating where the serialisation of the designated node is stored.
Note that this storing of serialized node's is not part of the functionality of pyrumpeltree.
* The ecryptuin_key is meant to be used as a node specific File Encryption Key for encrypting
and decrypting the above mentioned serialisation.
The Node contains the abstraction of attenuation. Invoking 'attenuate' on a Node will return a copy of
that same Node that is missing its full unattenuated access capability. We refer to these nodes as
attenuated nodes.
Next to attenuation, a Node also allows for decomposition. Using the index operator, a child node
can be derived using the childs name as local designation. this means that the child has a weak name
that only designates within the context of its parent, and one or two strong names (sparse capabilities)
that designate without such context. There is no '..' operation to get from a child to its parent just
as there is no path from the attenuated access sparse cap to the unattenuated sparse cap. Enforcing
the directional property of decomposition and attenuation is the main feature of this library.
One important note to potential users: Depending on your threath model, you may want to consider using
the original C++ implementation instead. You will be having powerfull sparse capabilities in process
memory when using this library and high level languages like Python lack the ability to promptly wipe
sensitive memory after usage. This means that in Python the capabilities may linger in memory long after
usage and a process memory dump may reveal these capabilities for a much longer timespan than they would
when using the C++ library. On the other hand, low level languages come with their own issues conversely
related to the very same language features that allow promptly wiping sensitive data after usage. You
should carefully consider your threath model before choosing for pyrumpeltree or opting for Rumpletree++
instead.
If you find any bugs, see any cryptographical design problems in the algoritm or just want to discuss
the usage of this python library, please contact the author : pibara[at]gmail[dot]com.
"""
import base64
import hmac
import hashlib
import os
def _macfun(data,key):
return hmac.new(key, msg=data, digestmod=hashlib.sha256).digest()
#This little helper class does the actual RumpleTree hashing, capabilities and storage info stuff.
class _Engine:
def __init__(self,secret,cloudsecret):
self.secret=secret #Secret that should NOT be shared with client instances.
self.cloudsecret=cloudsecret #Secret that should be shared with client instances to allow client side attenuation.
def nodecaps(self,firstcap):
if firstcap[1] == 'o': #Check for read-only or attenuated cap prefix.
cap1=None #There is no unattenuated/rw cap for this one
cap2=firstcap #We use the function argument as ro/attenuated cap
key2=base64.b32decode(firstcap[3:]+"====") #Calculate the FEK by decoding the non-prefix part of the ro/attenuated cap.
else:
cap1=firstcap #Use the function parameter as rw/unattenuated cap
key1=base64.b32decode(firstcap[3:]+"====") #Decode the non-prefix part of the unattenuated cap.
key2=_macfun(b"read-only::nosalt",key1) #Derive the FEK by hashing a fixed string with cap1 as key.
cap2="ro-" + base64.b32encode(key2)[:-4].decode("utf-8") #Also encode the FEK into a cap for ro/attenuated access.
key3=_macfun(self.cloudsecret.encode(),cap2.encode()) #Derive a third key from the attenuated/ro cap
str3= base64.b32encode(key3)[:-4].decode("utf-8") #Now start off with encoding in base32.
location = str3[0:3] + "/" + str3[3:6] + "/" + str3[6:] #Create a path for a ballanced directory tree for where to serialize our nodes.
return (cap1,cap2,location,key2)
def derive(self,parentstoragekey,key,attenuated):
#Derive an unattenuated child cap using attenuated parent cap
intermediatekey = _macfun(self.secret,parentstoragekey)
childkey = _macfun(key.encode(),parentstoragekey)
if attenuated == False:
return "rw-" + base64.b32encode(childkey)[:-4].decode("utf-8")
else:
#Attenuate the result if requested.
key2=_macfun(b"read-only::nosalt",childkey)
return "ro-" + base64.b32encode(key2)[:-4].decode("utf-8")
class Storage:
"""Trivial class combining the two storage entity attributes"""
def __init__(self,location,key):
self.location=location
self.key=key
def __call__(self):
"""Returns storage entity location attribute"""
return self.location
def crypto_key(self):
"""Returns storage entity FEK attribute"""
return self.key
class Node:
"""Represents a single node in the Rumpelstiltskin singly attenuated DAG."""
def __init__(self,engine,firstcap):
self.engine=engine
if firstcap[1] == "w":
self.ro=False
else:
self.ro=True
(self.rwcap,self.rocap,self.location,self.storagekey)=self.engine.nodecaps(firstcap)
def __getitem__(self,key):
"""Get a Node object for a named entity one level down in the tree."""
if self.ro:
return Node(self.engine,self.engine.derive(self.storagekey,key,True))
else:
return Node(self.engine,self.engine.derive(self.storagekey,key,False))
def __eq__(self,other):
return self.rocap == other.rocap
def cap(self):
"""Get the least attenuated sparse capability available for this Node"""
if self.rwcap == None:
return self.rocap
else:
return self.rwcap
def attenuated(self):
"""Get the read-only or attenuated access sparse capability for this node"""
return Node(self.engine,self.rocap)
def isattenuated(self):
"""Returns a boolean indicating if access to this Node is attenuated or read only"""
return self.ro
def storage(self):
"""Returns a Storage entity for this Node"""
return Storage(self.location,self.storagekey)
class Server:
"""Helper class for accessing root Node's within a Rumpelstiltskin singly attenuated DAG
on the server or file-system side of things."""
def __init__(self,secret,cloudsecret):
self.engine=_Engine(secret,cloudsecret)
def __getitem__(self,rootcap):
"""Get any Node by one of its sparse caps."""
return Node(self.engine,rootcap)
class Client:
"""Helper class for doing some non-decomposition operations on Nodes at the client or
user-process side of things."""
def __init__(self,cloudsecret):
self.pseudoserver=Server("",cloudsecret)
def __getitem__(self,key):
"""Get any Node by one of its sparse caps."""
return self.pseudoserver[key]
def attenuate(self,cap):
"""Get the attenuated/read-only sister sparse cap belonging with an unattenuated or
read/write sparse cap"""
return self.pseudoserver[key].attenuated()
def storage(self,cap):
"""Get the Storage entity belonging to the node designated by the sparse cap provided."""
return self.pseudoserver[cap].storage()
def randomsecret():
"""Helper function for generating a decent size secret.
This secret should be stored persistently with strict access rights allowing only the server
or file-system process access to the persistently stored secret."""
secret=""
for iteration in range (0,256):
secret += base64.b32encode(os.urandom(32))[:-4].decode("utf-8")
return secret.encode()
def randomrootcap():
"""Generate a random rootcap (and thus a new Rumpelstiltskin singly attenuated DAG)."""
return "rw-" + base64.b32encode(os.urandom(32))[:-4].decode("utf-8")
def pass2rootcap(passwd):
"""Create a rootcap for a new Rumpelstiltskin singly attenuated DAG using a password"""
binkey=hashlib.pbkdf2_hmac('sha256', passwd.encode(), b'<PASSWORD>', 131072)
return "rw-" + base64.b32encode(binkey)[:-4].decode("utf-8")
def create_server(secret,cloudsecret=""):
"""Create a server side or file-system side 'server' object for looking up and processing
Rumpelstiltskin singly attenuated DAG nodes."""
return Server(secret,cloudsecret)
def create_client(cloudsecret=""):
"""Create a (network or file-system) client side 'client' object for potentially doing some
client side operations in order to offload the server/file-system side with operations
that are possible | |
'class': a.__class__.__name__}
a_con = a.Consumer(conn.channel())
b_con = b.Consumer(conn.channel())
c_con = c.Consumer(conn.channel())
# when cast is invoked for round-robin:
# only one consumer for that actor class receives the message and
# messages are consumed by consumers for other actor classes
a.cast(method=data_with_args['method'], args=data_with_args['args'],
type=ACTOR_TYPE.RR)
a_msg = get_next_msg(a_con)
b_msg = get_next_msg(b_con)
self.assertTrue((a_msg or b_msg) and (not(a_msg and b_msg)))
self.assertIsNone(get_next_msg(c_con))
clean_up_consumers([a_con, b_con, c_con])
@with_in_memory_connection
def test_cast_round_robin_send_repeatedly(self, conn):
# when cast is invoked many time,
# eventually all consumers should consume at least one message
a, b, c = RRActor(conn), RRActor(conn), A()
data_with_args = {'method': 'foo', 'args': {'foo': 'foo_arg'},
'class': a.__class__.__name__}
a_con = a.Consumer(conn.channel())
b_con = b.Consumer(conn.channel())
c_con = c.Consumer(conn.channel())
for i in range(1, 5):
a.cast(method=data_with_args['method'],
args=data_with_args['args'],
type=ACTOR_TYPE.RR)
self.assertNextMsgDataEqual(a_con, data_with_args)
self.assertNextMsgDataEqual(b_con, data_with_args)
self.assertIsNone(get_next_msg(c_con))
clean_up_consumers([a_con, b_con, c_con])
# -----------------------------------------------------------------
# Test functionality for correct dispatch of method calls
# --------------------------------------------------------------------------
def test_on_message_when_reply_to_is_set(self):
class Foo(Actor):
class state():
foo_called = False
def foo(self, bar):
self.foo_called = True
return (bar, ret_val)
args, ret_val = {'bar': 'foo_arg'}, 'foooo'
ticket = uuid()
body, message = get_test_message(
'foo', args, Foo.__class__.__name__, reply_to=[ticket])
a = Foo()
a.reply = Mock()
# when the property reply_to is set, reply is called
a._on_message(body, message)
self.assertTrue(a.state.foo_called)
a.reply.assert_called_oncce()
def test_on_message_when_reply_to_not_set(self):
ret_val = 'fooo'
class Foo(Actor):
class state():
foo_called = False
def foo(self, bar):
self.foo_called = True
return (bar, ret_val)
# when the property reply_to is not set, reply is not called
body, message = get_test_message(
'foo', {'bar': 'foo_arg'}, Foo.__class__.__name__)
message.ack = Mock()
a = Foo()
a.reply = Mock()
result = a._on_message(body, message)
self.assertTrue(a.state.foo_called)
self.assertEquals(a.reply.call_count, 0)
# message should be acknowledged after the method is executed
message.ack.assert_called_once()
# no result should be returned
self.assertIsNone(result)
def test_on_message_invokes_on_dispatch_when_reply_to_not_set(self):
ret_val = 'fooo'
body, message = get_test_message('foo', {'bar': 'foo_arg'},
A.__class__.__name__)
a = A()
a.reply = Mock()
a._DISPATCH = Mock(return_value=ret_val)
# when reply_to is not set:
# dispatch result should be ignored
result = a._on_message(body, message)
a._DISPATCH.assert_called_once_wiith(message, body)
self.assertIsNone(result)
self.assertEqual(a.reply.call_count, 0)
def test_on_message_invokes_on_dispatch_when_reply_to_set(self):
ret_val = 'fooo'
ticket = uuid()
body, message = get_test_message('foo', {'bar': 'foo_arg'},
A.__class__.__name__,
reply_to=ticket)
a = A()
a.reply = Mock()
a._DISPATCH = Mock(return_value=ret_val)
# when reply_to is set:
# dispatch result should be ignored
a._on_message(body, message)
a._DISPATCH.assert_called_once_with(body, ticket=ticket)
a.reply.assert_called_once_with(message, ret_val)
def test_on_message_when_no_method_is_passed(self):
args, ret_val = {'bar': 'foo_arg'}, 'fooo'
class Foo(Actor):
class state():
def foo(self, bar):
self.foo_called = True
return (bar, ret_val)
body, message = get_test_message('', {'bar': 'foo_arg'},
Foo.__class__.__name__)
message.ack = Mock()
a = Foo()
a.default_receive = Mock()
result = a._on_message(body, message)
a.default_receive.assert_called_once(args)
# message should be acknowledged even when the method does not exist
message.ack.assert_called_once_with()
self.assertIsNone(result)
def test_on_message_when_private_method_is_passed(self):
body, message = get_test_message('_foo', {},
A.__class__.__name__)
message.ack = Mock()
a = A()
a.state._foo = Mock()
a._on_message(body, message)
self.assertEqual(a.state._foo.call_count, 0)
# message should be acknowledged even when method is not invoked
message.ack.assert_called_once_with()
def test_on_message_when_unexisted_method_is_passed(self):
body, message = get_test_message('bar', {'bar': 'foo_arg'},
A.__class__.__name__)
message.ack = Mock()
a = A()
a.default_receive = Mock()
result = a._on_message(body, message)
# message should be acknowledged even when the method does not exist
message.ack.assert_called_once_with()
self.assertIsNone(result)
def test_on_message_delegated_to_agent(self):
body, message = get_test_message('bar', {'bar': 'foo_arg'},
A.__class__.__name__)
a = A()
a.agent = Mock()
a.on_message(body, message)
a.agent.process_message.assert_called_once_with(a, body, message)
def assert_on_message_exception_raise(self, exception_cls, ack_count):
body, message = get_test_message('bar', {'bar': 'foo_arg'},
A.__class__.__name__)
a = A()
message.ack = Mock()
a.handle_cast = Mock(side_effect=exception_cls('Boom'))
with self.assertRaises(exception_cls):
a._on_message(body, message)
self.assertEquals(message.ack.call_count, ack_count)
a.handle_cast.reset_mock()
message.ack.reset_mock()
message.ack = Mock()
a.handle_call = Mock(side_effect=exception_cls('Boom'))
body, message = get_test_message('bar', {'bar': 'foo_arg'},
A.__class__.__name__,
reply_to=[uuid])
with self.assertRaises(exception_cls):
a._on_message(body, message)
self.assertEquals(message.ack.call_count, ack_count)
def test_on_message_when_base_exception_occurs(self):
# Do not ack the message if an exceptional error occurs,
self.assert_on_message_exception_raise(Exception, 0)
# but do ack the message if BaseException
# (SystemExit or KeyboardInterrupt)
# is raised, as this is probably intended.
self.assert_on_message_exception_raise(BaseException, 1)
def test_dispatch_return_values(self):
"""In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
# when result is correct
ret_val = 'foooo'
a = A()
body, message = get_test_message('bar', {'bar': 'foo_arg'},
a.__class__.__name__)
expected_result = {'ok': ret_val}
a.state.bar = Mock(return_value=ret_val)
result = a._DISPATCH(body)
self.assertDictContainsSubset(expected_result, result)
self.assertNotIn('nok', result)
# when method called does not return a result
a.state.bar.reset_mock()
a.state.bar = Mock(return_value=None)
expected_result = {'ok': None}
result = a._DISPATCH(body)
self.assertDictContainsSubset(expected_result, result)
self.assertNotIn('nok', result)
# when method does not exist
body, message = get_test_message(
'foo', {'bar': 'foo_arg'}, a.__class__.__name__)
result = a._DISPATCH(body)
self.assertIn('nok', result)
self.assertIn("KeyError('foo',)", result['nok'])
# when calling a private method
body, message = get_test_message(
'_foo', {'bar': 'foo_arg'}, a.__class__.__name__)
a._foo = Mock()
result = a._DISPATCH(body)
self.assertIn('nok', result)
self.assertIn("KeyError('_foo',)", result['nok'])
# when calling a private method
body, message = get_test_message(
'__foo', {'bar': 'foo_arg'}, a.__class__.__name__)
a.__foo = Mock()
result = a._DISPATCH(body)
self.assertIn('nok', result)
self.assertIn("KeyError('__foo',)", result['nok'])
# when method called raises an exception
body, message = get_test_message('foo_with_exception',
{'bar': 'foo_arg'},
a.__class__.__name__)
a.foo_with_exception = Mock(side_effect=Exception('FooError'))
result = a._DISPATCH(body)
self.assertIn('nok', result)
self.assertIn("KeyError('foo_with_exception',)", result['nok'])
@with_in_memory_connection
def test_on_message_is_sending_to_reply_queue(self, conn):
ret_result = 'foooo'
class Foo(A):
class state:
def bar(self, my_bar):
return ret_result
a = Foo(conn)
ticket = uuid()
delivery_tag = uuid()
body, message = get_encoded_test_message('bar', {'my_bar': 'bar_arg'},
A.__class__.__name__,
reply_to=ticket,
delivery_tag=delivery_tag)
# Set up a reply queue to read from
# reply_q and reply_exchange should be set the sender
a.reply_exchange = a.reply_exchange.bind(a.connection.default_channel)
maybe_declare(a.reply_exchange)
reply_q = a.get_reply_queue(ticket)
reply_q(a.connection.default_channel).declare()
a._on_message(body, message)
a_con = Consumer(conn.channel(), reply_q)
self.assertNextMsgDataEqual(a_con, {'ok': ret_result})
@with_in_memory_connection
def test_reply_queue_is_declared_after_call(self, conn):
ticket = uuid()
with patch('cell.actors.uuid') as new_uuid:
new_uuid.return_value = ticket
a = A(conn)
reply_q = a.get_reply_queue(ticket)
a.get_reply_queue = Mock(return_value=reply_q)
with self.assertRaises(ChannelError):
reply_q(conn.channel()).queue_declare(passive=True)
a.call(method='foo', args={}, type=ACTOR_TYPE.DIRECT)
a.get_reply_queue.assert_called_once_with(ticket)
self.assertTrue(
reply_q(conn.channel()).queue_declare(passive=True))
@with_in_memory_connection
def test_reply_send_correct_msg_body_to_the_reply_queue(self, conn):
a = A(conn)
ticket = uuid()
delivery_tag = 2
body, message = get_encoded_test_message('bar', {'my_bar': 'bar_arg'},
a.__class__.__name__,
reply_to=ticket,
delivery_tag=delivery_tag)
# Set up a reply queue to read from
# reply_q and reply_exchange should be set the sender
a.reply_exchange.maybe_bind(a.connection.default_channel)
maybe_declare(a.reply_exchange)
reply_q = a.get_reply_queue(ticket)
reply_q(a.connection.default_channel).declare()
a.reply(message, body)
a_con = Consumer(conn.channel(), reply_q)
reply_msg = get_next_msg(a_con)
reply_body = reply_msg.decode()
self.assertEquals(reply_body, body)
# -----------------------------------------------------------------
# Test actor to actor binding functionality (add_binding, remove_binding)
# ----------------------------------------------------------------
def mock_exchange(self, actor, type):
exchange = actor.type_to_exchange[type]()
exchange.bind_to = Mock()
exchange.exchange_unbind = Mock()
exchange.declare = Mock()
actor.type_to_exchange[type] = Mock(return_value=exchange)
return exchange
def mock_queue(self, actor, type):
queue = actor.type_to_queue[type]()
queue.bind_to = Mock()
queue.unbind_from = Mock()
queue.declare = Mock()
actor.type_to_queue[type] = Mock(return_value=queue)
return queue
@with_in_memory_connection
def test_add_remove_binding_for_direct_type(self, conn):
# Add binding between the inbox queue
# of one actor to the outbox queue of another
a, b = A(conn), A(conn)
routing_key = 'foooo'
mock_entity_type = ACTOR_TYPE.DIRECT
inbox_queue = self.mock_queue(a, mock_entity_type)
source_ex = b.outbox
a._add_binding(source_ex.as_dict(), routing_key, mock_entity_type)
inbox_queue.bind_to.assert_called_with(
exchange=b.outbox, routing_key=routing_key)
a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type)
inbox_queue.unbind_from.assert_called_with(
exchange=source_ex, routing_key=routing_key)
@with_in_memory_connection
def test_add_remove_binding_for_scatter_type(self, conn):
a, b = A(conn), A(conn)
routing_key, mock_entity_type = 'foooo', ACTOR_TYPE.SCATTER
dest_ex = self.mock_exchange(a, mock_entity_type)
source_ex = b.outbox
a._add_binding(source_ex.as_dict(),
routing_key=routing_key,
inbox_type=mock_entity_type)
dest_ex.bind_to.assert_called_with(exchange=source_ex,
routing_key=routing_key)
a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type)
dest_ex.exchange_unbind.assert_called_with(
exchange=source_ex, routing_key=routing_key)
@with_in_memory_connection
def test_add_remove_binding_for_rr_type(self, conn):
a, b = A(conn), A(conn)
routing_key, mock_entity_type = 'foooo', ACTOR_TYPE.RR
dest_exchange = self.mock_exchange(a, mock_entity_type)
source_ex = b.outbox
a._add_binding(source_ex.as_dict(), routing_key, mock_entity_type)
dest_exchange.bind_to.assert_called_with(
exchange=source_ex, routing_key=routing_key)
a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type)
dest_exchange.exchange_unbind.assert_called_with(
exchange=source_ex, routing_key=routing_key)
@with_in_memory_connection
def test_add_binding_when_actor_for_not_supported_type(self, conn):
a, b = A(conn), A(conn)
entity_type = 'test'
self.assertNotIn(entity_type, a.types)
with self.assertRaises(Exception):
a._add_binding(b.outbox.as_dict(),
routing_key=b.routing_key, inbox_type=entity_type)
@with_in_memory_connection
def test_add_remove_binding_when_routing_key_is_empty(self, conn):
a = A(conn)
routing_key, mock_entity_type = "", ACTOR_TYPE.SCATTER
source_ex = Exchange('bar.foo.bar', mock_entity_type)
exchange = self.mock_exchange(a, mock_entity_type)
a._add_binding(source_ex.as_dict(), routing_key, mock_entity_type)
exchange.bind_to.assert_called_with(exchange=source_ex,
routing_key=routing_key)
a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type)
exchange.exchange_unbind.assert_called_with(exchange=source_ex,
routing_key=routing_key)
class As(Actor):
class state():
def foo(self, who=None):
pass
def meth(self):
pass
class test_ActorProxy(Case):
@with_in_memory_connection
def test_init(self, conn):
"""test that __init__ sets fields"""
id = uuid()
ag, res = Mock(), Mock()
# we need to have wait for result,
a1 = ActorProxy(qualname(A), id, connection=conn, agent=ag)
self.assertEqual(a1.id, id)
self.assertIsNone(a1.async_start_result)
self.assertIsInstance(a1._actor, A)
self.assertEqual(a1._actor.name, A().__class__.__name__)
self.assertEqual(a1._actor.agent, ag)
self.assertEqual(a1._actor.id, a1.id)
self.assertEqual(a1._actor.connection, conn)
a1 = ActorProxy(qualname(A), id, res, connection=conn, agent=ag)
self.assertEqual(a1.id, id)
self.assertEqual(a1.async_start_result, res)
self.assertEqual(a1._actor.id, a1.id)
self.assertIsInstance(a1._actor, A)
self.assertEqual(a1._actor.name, A().__class__.__name__)
self.assertEqual(a1._actor.agent, ag)
self.assertEqual(a1._actor.connection, conn)
def assert_actor_method_called(self, meth, func):
args = ['foo', {'who': 'the quick brown...'}]
kwargs = {'nowait': True}
func(*args, **kwargs)
meth.assert_called_once_with(*args, **kwargs)
args = ['bar', {'who': 'the quick brown...'}]
kwargs = {'nowait': True}
# bar method is not supported so error is thorwm
with self.assertRaises(AttributeError):
func(*args, **kwargs)
with self.assertRaises(WrongNumberOfArguments):
func()
def assert_actor_method_called_with_par_foo(
| |
if len(drive) == 23:
# binary data from parsed lnk
self.drive = drive[1:3]
else:
# text representation
m = _DRIVE_PATTERN.match(drive.strip())
if m:
self.drive = m.groups()[0].upper() + ':'
self.drive = self.drive.encode()
else:
raise FormatException("This is not a valid drive: " + str(drive))
@property
def bytes(self):
drive = self.drive
padded_str = drive + b'\\' + b'\x00' * 19
return b'\x2F' + padded_str
# drive = self.drive
# if isinstance(drive, str):
# drive = drive.encode()
# return b'/' + drive + b'\\' + b'\x00' * 19
def __str__(self):
return "<DriveEntry: %s>" % self.drive
class PathSegmentEntry(object):
def __init__(self, bytes=None):
self.type = None
self.file_size = None
self.modified = None
self.short_name = None
self.created = None
self.accessed = None
self.full_name = None
if bytes is None:
return
buf = BytesIO(bytes)
self.type = _ENTRY_TYPES.get(read_short(buf), 'UNKNOWN')
short_name_is_unicode = self.type.endswith('(UNICODE)')
if self.type == 'ROOT_KNOWN_FOLDER':
self.full_name = '::' + guid_from_bytes(buf.read(16))
# then followed Beef0026 structure:
# short size
# short version
# int signature == 0xBEEF0026
# (16 bytes) created timestamp
# (16 bytes) modified timestamp
# (16 bytes) accessed timestamp
return
if self.type == 'KNOWN_FOLDER':
_ = read_short(buf) # extra block size
extra_signature = read_int(buf)
if extra_signature == 0x23FEBBEE:
_ = read_short(buf) # unknown
_ = read_short(buf) # guid len
# that format recognized by explorer
self.full_name = '::' + guid_from_bytes(buf.read(16))
return
self.file_size = read_int(buf)
self.modified = read_dos_datetime(buf)
unknown = read_short(buf) # FileAttributesL
if short_name_is_unicode:
self.short_name = read_cunicode(buf)
else:
self.short_name = read_cstring(buf, padding=True)
extra_size = read_short(buf)
extra_version = read_short(buf)
extra_signature = read_int(buf)
if extra_signature == 0xBEEF0004:
# indicator_1 = read_short(buf) # see below
# only_83 = read_short(buf) < 0x03
# unknown = read_short(buf) # 0x04
# self.is_unicode = read_short(buf) == 0xBeef
self.created = read_dos_datetime(buf) # 4 bytes
self.accessed = read_dos_datetime(buf) # 4 bytes
offset_unicode = read_short(buf) # offset from start of extra_size
# only_83_2 = offset_unicode >= indicator_1 or offset_unicode < 0x14
if extra_version >= 7:
offset_ansi = read_short(buf)
file_reference = read_double(buf)
unknown2 = read_double(buf)
long_string_size = 0
if extra_version >= 3:
long_string_size = read_short(buf)
if extra_version >= 9:
unknown4 = read_int(buf)
if extra_version >= 8:
unknown5 = read_int(buf)
if extra_version >= 3:
self.full_name = read_cunicode(buf)
if long_string_size > 0:
if extra_version >= 7:
self.localized_name = read_cunicode(buf)
else:
self.localized_name = read_cstring(buf)
version_offset = read_short(buf)
@classmethod
def create_for_path(cls, path):
entry = cls()
entry.type = os.path.isdir(path) and TYPE_FOLDER or TYPE_FILE
try:
st = os.stat(path)
entry.file_size = st.st_size
entry.modified = datetime.fromtimestamp(st.st_mtime)
entry.created = datetime.fromtimestamp(st.st_ctime)
entry.accessed = datetime.fromtimestamp(st.st_atime)
except FileNotFoundError:
now = datetime.now()
entry.file_size = 0
entry.modified = now
entry.created = now
entry.accessed = now
entry.short_name = ntpath.split(path)[1]
entry.full_name = entry.short_name
return entry
def _validate(self):
if self.type is None:
raise MissingInformationException("Type is missing, choose either TYPE_FOLDER or TYPE_FILE.")
if self.file_size is None:
if self.type.startswith('FOLDER') or self.type in ['KNOWN_FOLDER', 'ROOT_KNOWN_FOLDER']:
self.file_size = 0
else:
raise MissingInformationException("File size missing")
if self.created is None:
self.created = datetime.now()
if self.modified is None:
self.modified = datetime.now()
if self.accessed is None:
self.accessed = datetime.now()
# if self.modified is None or self.accessed is None or self.created is None:
# raise MissingInformationException("Date information missing")
if self.full_name is None:
raise MissingInformationException("A full name is missing")
if self.short_name is None:
self.short_name = self.full_name
@property
def bytes(self):
if self.full_name is None:
return
self._validate()
out = BytesIO()
entry_type = self.type
if entry_type == 'KNOWN_FOLDER':
write_short(_ENTRY_TYPE_IDS[entry_type], out)
write_short(0x1A, out) # size
write_int(0x23FEBBEE, out) # extra signature
write_short(0x00, out) # extra signature
write_short(0x10, out) # guid size
out.write(bytes_from_guid(self.full_name.strip(':')))
return out.getvalue()
if entry_type == 'ROOT_KNOWN_FOLDER':
write_short(_ENTRY_TYPE_IDS[entry_type], out)
out.write(bytes_from_guid(self.full_name.strip(':')))
write_short(0x26, out) # 0xBEEF0026 structure size
write_short(0x01, out) # version
write_int(0xBEEF0026, out) # extra signature
write_int(0x11, out) # some flag for containing datetime
write_double(0x00, out) # created datetime
write_double(0x00, out) # modified datetime
write_double(0x00, out) # accessed datetime
write_short(0x14, out) # unknown
return out.getvalue()
short_name_len = len(self.short_name) + 1
try:
self.short_name.encode("ascii")
short_name_is_unicode = False
short_name_len += short_name_len % 2 # padding
except (UnicodeEncodeError, UnicodeDecodeError):
short_name_is_unicode = True
short_name_len = short_name_len * 2
self.type += " (UNICODE)"
write_short(_ENTRY_TYPE_IDS[entry_type], out)
write_int(self.file_size, out)
write_dos_datetime(self.modified, out)
write_short(0x10, out)
if short_name_is_unicode:
write_cunicode(self.short_name, out)
else:
write_cstring(self.short_name, out, padding=True)
indicator = 24 + 2 * len(self.short_name)
write_short(indicator, out) # size
write_short(0x03, out) # version
write_short(0x04, out) # signature part1
write_short(0xBeef, out) # signature part2
write_dos_datetime(self.created, out)
write_dos_datetime(self.accessed, out)
offset_unicode = 0x14 # fixed data structure, always the same
write_short(offset_unicode, out)
offset_ansi = 0 # we always write unicode
write_short(offset_ansi, out) # long_string_size
write_cunicode(self.full_name, out)
offset_part2 = 0x0E + short_name_len
write_short(offset_part2, out)
return out.getvalue()
def __str__(self):
return "<PathSegmentEntry: %s>" % self.full_name
class UwpSubBlock:
block_names = {
0x11: 'PackageFamilyName',
# 0x0e: '',
# 0x19: '',
0x15: 'PackageFullName',
0x05: 'Target',
0x0f: 'Location',
0x20: 'RandomGuid',
0x0c: 'Square150x150Logo',
0x02: 'Square44x44Logo',
0x0d: 'Wide310x150Logo',
# 0x04: '',
# 0x05: '',
0x13: 'Square310x310Logo',
# 0x0e: '',
0x0b: 'DisplayName',
0x14: 'Square71x71Logo',
0x64: 'RandomByte',
0x0a: 'DisplayName',
# 0x07: '',
}
block_types = {
'string': [0x11, 0x15, 0x05, 0x0f, 0x0c, 0x02, 0x0d, 0x13, 0x0b, 0x14, 0x0a],
}
def __init__(self, bytes=None, type=None, value=None):
self._data = bytes or b''
self.type = type
self.value = value
self.name = None
if self.type is not None:
self.name = self.block_names.get(self.type, 'UNKNOWN')
if not bytes:
return
buf = BytesIO(bytes)
self.type = read_byte(buf)
self.name = self.block_names.get(self.type, 'UNKNOWN')
self.value = self._data[1:] # skip type
if self.type in self.block_types['string']:
unknown = read_int(buf)
probably_type = read_int(buf)
if probably_type == 0x1f:
string_len = read_int(buf)
self.value = read_cunicode(buf)
def __str__(self):
string = f'UwpSubBlock {self.name} ({hex(self.type)}): {self.value}'
return string.strip()
@property
def bytes(self):
out = BytesIO()
if self.value:
if isinstance(self.value, str):
string_len = len(self.value) + 1
write_byte(self.type, out)
write_int(0, out)
write_int(0x1f, out)
write_int(string_len, out)
write_cunicode(self.value, out)
if string_len % 2 == 1: # padding
write_short(0, out)
elif isinstance(self.value, bytes):
write_byte(self.type, out)
out.write(self.value)
result = out.getvalue()
return result
class UwpMainBlock:
magic = b'\x31\x53\x50\x53'
def __init__(self, bytes=None, guid: Optional[str] = None, blocks=None):
self._data = bytes or b''
self._blocks = blocks or []
self.guid: str = guid
if not bytes:
return
buf = BytesIO(bytes)
magic = buf.read(4)
self.guid = guid_from_bytes(buf.read(16))
# read sub blocks
while True:
sub_block_size = read_int(buf)
if not sub_block_size: # last size is zero
break
sub_block_data = buf.read(sub_block_size - 4) # includes block_size
self._blocks.append(UwpSubBlock(sub_block_data))
def __str__(self):
string = f'<UwpMainBlock> {self.guid}:\n'
for block in self._blocks:
string += f' {block}\n'
return string.strip()
@property
def bytes(self):
blocks_bytes = [block.bytes for block in self._blocks]
out = BytesIO()
out.write(self.magic)
out.write(bytes_from_guid(self.guid))
for block in blocks_bytes:
write_int(len(block) + 4, out)
out.write(block)
write_int(0, out)
result = out.getvalue()
return result
class UwpSegmentEntry:
magic = b'APPS'
header = b'\x08\x00\x03\x00\x00\x00\x00\x00\x00\x00'
def __init__(self, bytes=None):
self._blocks = []
self._data = bytes
if bytes is None:
return
buf = BytesIO(bytes)
unknown = read_short(buf)
size = read_short(buf)
magic = buf.read(4) # b'APPS'
blocks_size = read_short(buf)
unknown2 = buf.read(10)
# read main blocks
while True:
block_size = read_int(buf)
if not block_size: # last size is zero
break
block_data = buf.read(block_size - 4) # includes block_size
self._blocks.append(UwpMainBlock(block_data))
def __str__(self):
string = '<UwpSegmentEntry>:\n'
for block in self._blocks:
string += f' {block}\n'
return string.strip()
@property
def bytes(self):
blocks_bytes = [block.bytes for block in self._blocks]
blocks_size = sum([len(block) + 4 for block in blocks_bytes]) + 4 # with terminator
size = (
2 # size
+ len(self.magic)
+ 2 # second size
+ len(self.header)
+ blocks_size # blocks with terminator
)
out = BytesIO()
write_short(0, out)
write_short(size, out)
out.write(self.magic)
write_short(blocks_size, out)
out.write(self.header)
for block in blocks_bytes:
write_int(len(block) + 4, out)
out.write(block)
write_int(0, out) # empty block
write_short(0, out) # ??
result = out.getvalue()
return result
@classmethod
def create(cls, package_family_name, target, location=None, logo44x44=None):
segment = cls()
blocks = [
UwpSubBlock(type=0x11, value=package_family_name),
UwpSubBlock(type=0x0e, value=b'\x00\x00\x00\x00\x13\x00\x00\x00\x02\x00\x00\x00'),
UwpSubBlock(type=0x05, value=target),
]
if location:
blocks.append(UwpSubBlock(type=0x0f, value=location)) # need for relative icon path
main1 = UwpMainBlock(guid='{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}', blocks=blocks)
segment._blocks.append(main1)
if logo44x44:
main2 = UwpMainBlock(
guid='{86D40B4D-9069-443C-819A-2A54090DCCEC}',
blocks=[UwpSubBlock(type=0x02, value=logo44x44)]
)
segment._blocks.append(main2)
return segment
class LinkTargetIDList(object):
def __init__(self, bytes=None):
self.items = []
if bytes is not None:
buf = BytesIO(bytes)
raw = []
entry_len = read_short(buf)
while entry_len > 0:
raw.append(buf.read(entry_len - 2)) # the length includes the size
entry_len = read_short(buf)
self._interpret(raw)
def _interpret(self, raw):
if | |
tr1 = comp1.getTimeRange()
tr2 = comp2.getTimeRange()
stats1 = tree.stats.get("Wind", tr1, al1, mergeMethod = "Average")
stats2 = tree.stats.get("Wind", tr2, al2, mergeMethod = "Average")
# If past the first 5 periods, return 1 (combine)
hours = self.hoursPastProductStart(tree, comp1)
if hours >= 5*12:
return 1
# check for none
if stats1 is None or stats2 is None:
return 0
mag1 = stats1[0]
mag2 = stats2[0]
dir1 = stats1[1]
dir2 = stats2[1]
# calculate the differences, mag and dir
magDiff = abs(mag1 - mag2)
dirDiff = abs(dir1 - dir2)
# account for the 360 to 0 problem
if dirDiff > 180:
dirDiff = abs(dirDiff - 360.0)
if magDiff <= magThreshold and dirDiff <= dirThreshold:
return 1
return 0
def hoursPastProductStart(self, tree, node):
# Compute the hours past the product start time (prodTR)
# that the current time range (curTR) starts.
# If the prodTR is not a multiple of 12, then it is either
# --an update and the first period is less than 12 hours, or
# --a pre-first period issuance.
# In these case, we return the hours past the product start
# as if the first period was a full 12-hour period.
# For example,
# A morning update issuance starting at 10 am would
# have an hoursPastProductStart for the first period
# of 4 hours.
# A pre-first period issuance starting at 4 am would
# have an hoursPastProductStart for the first period
# of -2 hours.
prodTR = tree.getTimeRange()
curTR = node.getTimeRange()
prodHours = prodTR.duration()/3600
prodMod = prodHours%12
if prodMod > 0:
try:
# check for 'pre-first period issuances'
period1Hours = self._issuanceInfo.period1TimeRange().duration()/3600
if period1Hours > 12:
adjustHours = prodMod
else:
adjustHours = -(12-prodMod)
except:
adjustHours = 0
else:
adjustHours = 0
prodStart = prodTR.startTime() + adjustHours*3600
return (curTR.startTime() - prodStart)/3600
def similarSky(self, tree, comp1, comp2):
# Returns true if sky stats are similar
# Necessary because of the override to sky_valueList above
al1 = comp1.getAreaLabel()
al2 = comp2.getAreaLabel()
tr1 = comp1.getTimeRange()
tr2 = comp2.getTimeRange()
return self.similarSkyLogic(tree, comp1, comp2, tr1, al1, tr2, al2)
def similarWx(self, tree, comp1, comp2):
# Returns true if wx stats are similar
al1 = comp1.getAreaLabel()
al2 = comp2.getAreaLabel()
tr1 = comp1.getTimeRange()
tr2 = comp2.getTimeRange()
return self.similarWxLogic(tree, comp1, comp2, tr1, al1, tr2, al2)
def similarPoP(self, tree, comp1, comp2):
# returns true if PoP stats are similar
stats1 = self.matchToWx(tree, comp1, "PoP")
stats2 = self.matchToWx(tree, comp2, "PoP")
if stats1 is None and stats2 is None:
return 1
# check for none
#if stats1 is None or stats2 is None:
# return 0
if stats1 == stats2:
return 1
if stats1 < self.pop_lower_threshold(tree, comp1) and \
stats2 < self.pop_lower_threshold(tree, comp2):
return 1
if stats1 > self.pop_upper_threshold(tree, comp1) and \
stats2 > self.pop_upper_threshold(tree, comp2):
return 1
return 0
## Submitted by <NAME> 3/05
## The problem with combining long time periods, is that the
## combined period is growing 12 hours at a time. If you get rid of the bleed
## over grids for MinT and MaxT (SampleAnalysis temporalCoverage_hours_dict),
## then you start returning None for either MaxT or MinT during these 12 hour periods.
## To combat this, I check the duration of tr1 and tr2.
## If it is 12 or less then I check to see if it is day or night.
## For MaxT, it will return a combine if the period is 12 hours or less and it is a
## nighttime period.
## For MinT, it will return a combine if the period is 12 hours
## or less and it is a daytime period. This allowed long periods to be grouped
## together without bleed over.
def similarMaxT(self, tree, comp1, comp2):
# returns true if temp stats are similar
# this number determines if components are close enough to combine
tempThreshold = 5 # degrees
al1 = comp1.getAreaLabel()
al2 = comp2.getAreaLabel()
tr1 = comp1.getTimeRange()
tr2 = comp2.getTimeRange()
hours = (tr2.endTime()-tr1.startTime())/3600
if hours <= 24:
return 1
if (tr1.duration()/3600) <= 12:
dayNight = self.getPeriod(tr1, 1)
if dayNight == self.NIGHTTIME():
return 1
if (tr2.duration()/3600) <= 12:
dayNight = self.getPeriod(tr2, 1)
if dayNight == self.NIGHTTIME():
return 1
stats1 = tree.stats.get("MaxT", tr1, al1, mergeMethod = "Average")
stats2 = tree.stats.get("MaxT", tr2, al2, mergeMethod = "Average")
# check for none
if stats1 is None or stats2 is None:
return 0
if abs(stats1 - stats2) < tempThreshold:
return 1
return 0
def similarMinT(self, tree, comp1, comp2):
# returns true if temp stats are similar
# this number determines if components are close enough to combine
tempThreshold = 5 # degrees
al1 = comp1.getAreaLabel()
al2 = comp2.getAreaLabel()
tr1 = comp1.getTimeRange()
tr2 = comp2.getTimeRange()
hours = (tr2.endTime()-tr1.startTime())/3600
if hours <= 24:
return 1
if (tr1.duration()/3600) <= 12:
dayNight = self.getPeriod(tr1, 1)
if dayNight == self.DAYTIME():
return 1
if (tr2.duration()/3600) <= 12:
dayNight = self.getPeriod(tr2, 1)
if dayNight == self.DAYTIME():
return 1
# check for none
stats1 = tree.stats.get("MinT", tr1, al1, mergeMethod = "Average")
stats2 = tree.stats.get("MinT", tr2, al2, mergeMethod = "Average")
if stats1 is None or stats2 is None:
return 0
if abs(stats1 - stats2) < tempThreshold:
return 1
return 0
def similarWaveHeight(self, tree, comp1, comp2):
# returns true if seas stats are similar
# this number dtermines if components are close enough to combine
seaThreshold = 4 # feet
al1 = comp1.getAreaLabel()
al2 = comp2.getAreaLabel()
tr1 = comp1.getTimeRange()
tr2 = comp2.getTimeRange()
stats1 = tree.stats.get("WaveHeight", tr1, al1, mergeMethod ="Average")
stats2 = tree.stats.get("WaveHeight", tr2, al2, mergeMethod ="Average")
# check for none
if stats1 is None or stats2 is None:
return 0
if stats1 == None or stats2 == None:
return 0
if abs(stats1 - stats2) < seaThreshold:
return 1
return 0
def similarDiurnalSkyWx(self, tree, comp1, comp2):
return self.similar_diurnal(tree, comp1, comp2, ["Sky", "Wx"])
def similar_diurnal(self, tree, comp1, comp2, elementList):
# Returns true if stats for the given elements are similar
# in the night and morning AND the afternoon and evening.
# NOTE: the night and morning MAY be similar to the afternoon
# and evening, so word methods need to test for this case.
#
# Meant to handle the case of clouds and fog in the
# night and morning clearing in the afternoon and
# evening.
# Assumes comp2 is a 12-hour period.
#print "similar_diurnal"
al1 = comp1.getAreaLabel()
al2 = comp2.getAreaLabel()
comp1TR = comp1.getTimeRange()
comp2TR = comp2.getTimeRange()
# comp2 morning, comp2 afternoon OR
# comp2 evening, comp2 night
c2tr1, c2tr2 = self.divideRange(comp2TR,6)
comparisons = []
if comp1TR.duration() == 12*3600:
# Compare comp1 night to comp2 morning
# and comp1 evening to comp2 afternoon
# OR comp1 afternoon to comp2 evening
# and comp1 morning to comp2 night
c1tr1, c1tr2 = self.divideRange(comp1TR,6)
comparisons.append((c1tr1, c2tr2))
comparisons.append((c1tr2, c2tr1))
else:
# We have already combined at least once so
# comp1 is at least 24 hours. Use the most
# recent 24 hours for comparison.
# if comp2 is daytime:
# compare comp1 morning to comp2 morning
# and comp1 afternoon to comp2 afternoon
# else
# compare comp1 evening to comp2 evening
# and comp1 night to comp2 night
subRanges = self.divideRange(comp1TR, 6)
length = len(subRanges)-1
c1tr1 = subRanges[length-3]
c1tr2 = subRanges[length-2]
comparisons.append((c1tr1, c2tr1))
comparisons.append((c1tr2, c2tr2))
# Do comparisons
wordDict = {}
for element in elementList:
wordDict[element] = []
#print "\nComparisons"
for tr1, tr2 in comparisons:
for element in elementList:
#print "comparing", tr1, tr2
#print " ", element
exec "flag = self.similar"+element+\
"Logic(tree, comp1, comp2, tr1, al1, tr2, al2)"
#print "flag", flag
if not flag:
#print "returning 0"
return 0
#print "returning 1"
return 1
def similarSkyLogic(self, tree, comp1, comp2, tr1, al1, tr2, al2):
stats1 = tree.stats.get("Sky", tr1, al1, mergeMethod ="Average")
stats2 = tree.stats.get("Sky", tr2, al2, mergeMethod ="Average")
# check for none
#print "stats1", stats1
#print "stats2", stats2
if stats1 is None or stats2 is None:
return 0
if stats1 == None or stats2 == None:
return 0
saveTR1 = comp1.timeRange
saveTR2 | |
the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccessGroupIncludeOkta.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccessGroupIncludeOkta.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
identity_provider_id: Optional[str] = None,
names: Optional[Sequence[str]] = None):
"""
:param Sequence[str] names: Friendly name of the Access Group.
"""
if identity_provider_id is not None:
pulumi.set(__self__, "identity_provider_id", identity_provider_id)
if names is not None:
pulumi.set(__self__, "names", names)
@property
@pulumi.getter(name="identityProviderId")
def identity_provider_id(self) -> Optional[str]:
return pulumi.get(self, "identity_provider_id")
@property
@pulumi.getter
def names(self) -> Optional[Sequence[str]]:
"""
Friendly name of the Access Group.
"""
return pulumi.get(self, "names")
@pulumi.output_type
class AccessGroupIncludeSaml(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "attributeName":
suggest = "attribute_name"
elif key == "attributeValue":
suggest = "attribute_value"
elif key == "identityProviderId":
suggest = "identity_provider_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccessGroupIncludeSaml. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccessGroupIncludeSaml.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccessGroupIncludeSaml.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
attribute_name: Optional[str] = None,
attribute_value: Optional[str] = None,
identity_provider_id: Optional[str] = None):
if attribute_name is not None:
pulumi.set(__self__, "attribute_name", attribute_name)
if attribute_value is not None:
pulumi.set(__self__, "attribute_value", attribute_value)
if identity_provider_id is not None:
pulumi.set(__self__, "identity_provider_id", identity_provider_id)
@property
@pulumi.getter(name="attributeName")
def attribute_name(self) -> Optional[str]:
return pulumi.get(self, "attribute_name")
@property
@pulumi.getter(name="attributeValue")
def attribute_value(self) -> Optional[str]:
return pulumi.get(self, "attribute_value")
@property
@pulumi.getter(name="identityProviderId")
def identity_provider_id(self) -> Optional[str]:
return pulumi.get(self, "identity_provider_id")
@pulumi.output_type
class AccessGroupRequire(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "anyValidServiceToken":
suggest = "any_valid_service_token"
elif key == "authMethod":
suggest = "auth_method"
elif key == "commonName":
suggest = "common_name"
elif key == "devicePostures":
suggest = "device_postures"
elif key == "emailDomains":
suggest = "email_domains"
elif key == "loginMethods":
suggest = "login_methods"
elif key == "serviceTokens":
suggest = "service_tokens"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccessGroupRequire. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccessGroupRequire.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccessGroupRequire.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
any_valid_service_token: Optional[bool] = None,
auth_method: Optional[str] = None,
azures: Optional[Sequence['outputs.AccessGroupRequireAzure']] = None,
certificate: Optional[bool] = None,
common_name: Optional[str] = None,
device_postures: Optional[Sequence[str]] = None,
email_domains: Optional[Sequence[str]] = None,
emails: Optional[Sequence[str]] = None,
everyone: Optional[bool] = None,
geos: Optional[Sequence[str]] = None,
githubs: Optional[Sequence['outputs.AccessGroupRequireGithub']] = None,
groups: Optional[Sequence[str]] = None,
gsuites: Optional[Sequence['outputs.AccessGroupRequireGsuite']] = None,
ips: Optional[Sequence[str]] = None,
login_methods: Optional[Sequence[str]] = None,
oktas: Optional[Sequence['outputs.AccessGroupRequireOkta']] = None,
samls: Optional[Sequence['outputs.AccessGroupRequireSaml']] = None,
service_tokens: Optional[Sequence[str]] = None):
if any_valid_service_token is not None:
pulumi.set(__self__, "any_valid_service_token", any_valid_service_token)
if auth_method is not None:
pulumi.set(__self__, "auth_method", auth_method)
if azures is not None:
pulumi.set(__self__, "azures", azures)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if common_name is not None:
pulumi.set(__self__, "common_name", common_name)
if device_postures is not None:
pulumi.set(__self__, "device_postures", device_postures)
if email_domains is not None:
pulumi.set(__self__, "email_domains", email_domains)
if emails is not None:
pulumi.set(__self__, "emails", emails)
if everyone is not None:
pulumi.set(__self__, "everyone", everyone)
if geos is not None:
pulumi.set(__self__, "geos", geos)
if githubs is not None:
pulumi.set(__self__, "githubs", githubs)
if groups is not None:
pulumi.set(__self__, "groups", groups)
if gsuites is not None:
pulumi.set(__self__, "gsuites", gsuites)
if ips is not None:
pulumi.set(__self__, "ips", ips)
if login_methods is not None:
pulumi.set(__self__, "login_methods", login_methods)
if oktas is not None:
pulumi.set(__self__, "oktas", oktas)
if samls is not None:
pulumi.set(__self__, "samls", samls)
if service_tokens is not None:
pulumi.set(__self__, "service_tokens", service_tokens)
@property
@pulumi.getter(name="anyValidServiceToken")
def any_valid_service_token(self) -> Optional[bool]:
return pulumi.get(self, "any_valid_service_token")
@property
@pulumi.getter(name="authMethod")
def auth_method(self) -> Optional[str]:
return pulumi.get(self, "auth_method")
@property
@pulumi.getter
def azures(self) -> Optional[Sequence['outputs.AccessGroupRequireAzure']]:
return pulumi.get(self, "azures")
@property
@pulumi.getter
def certificate(self) -> Optional[bool]:
return pulumi.get(self, "certificate")
@property
@pulumi.getter(name="commonName")
def common_name(self) -> Optional[str]:
return pulumi.get(self, "common_name")
@property
@pulumi.getter(name="devicePostures")
def device_postures(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "device_postures")
@property
@pulumi.getter(name="emailDomains")
def email_domains(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "email_domains")
@property
@pulumi.getter
def emails(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "emails")
@property
@pulumi.getter
def everyone(self) -> Optional[bool]:
return pulumi.get(self, "everyone")
@property
@pulumi.getter
def geos(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "geos")
@property
@pulumi.getter
def githubs(self) -> Optional[Sequence['outputs.AccessGroupRequireGithub']]:
return pulumi.get(self, "githubs")
@property
@pulumi.getter
def groups(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "groups")
@property
@pulumi.getter
def gsuites(self) -> Optional[Sequence['outputs.AccessGroupRequireGsuite']]:
return pulumi.get(self, "gsuites")
@property
@pulumi.getter
def ips(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "ips")
@property
@pulumi.getter(name="loginMethods")
def login_methods(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "login_methods")
@property
@pulumi.getter
def oktas(self) -> Optional[Sequence['outputs.AccessGroupRequireOkta']]:
return pulumi.get(self, "oktas")
@property
@pulumi.getter
def samls(self) -> Optional[Sequence['outputs.AccessGroupRequireSaml']]:
return pulumi.get(self, "samls")
@property
@pulumi.getter(name="serviceTokens")
def service_tokens(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "service_tokens")
@pulumi.output_type
class AccessGroupRequireAzure(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "identityProviderId":
suggest = "identity_provider_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccessGroupRequireAzure. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccessGroupRequireAzure.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccessGroupRequireAzure.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
identity_provider_id: Optional[str] = None,
ids: Optional[Sequence[str]] = None):
if identity_provider_id is not None:
pulumi.set(__self__, "identity_provider_id", identity_provider_id)
if ids is not None:
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter(name="identityProviderId")
def identity_provider_id(self) -> Optional[str]:
return pulumi.get(self, "identity_provider_id")
@property
@pulumi.getter
def ids(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "ids")
@pulumi.output_type
class AccessGroupRequireGithub(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "identityProviderId":
suggest = "identity_provider_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccessGroupRequireGithub. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccessGroupRequireGithub.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccessGroupRequireGithub.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
identity_provider_id: Optional[str] = None,
name: Optional[str] = None,
teams: Optional[Sequence[str]] = None):
"""
:param str name: Friendly name of the Access Group.
"""
if identity_provider_id is not None:
pulumi.set(__self__, "identity_provider_id", identity_provider_id)
if name is not None:
pulumi.set(__self__, "name", name)
if teams is not None:
pulumi.set(__self__, "teams", teams)
@property
@pulumi.getter(name="identityProviderId")
def identity_provider_id(self) -> Optional[str]:
return pulumi.get(self, "identity_provider_id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Friendly name of the Access Group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def teams(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "teams")
@pulumi.output_type
class AccessGroupRequireGsuite(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "identityProviderId":
suggest = "identity_provider_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccessGroupRequireGsuite. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccessGroupRequireGsuite.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccessGroupRequireGsuite.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
emails: Optional[Sequence[str]] = None,
identity_provider_id: Optional[str] = None):
if emails is not None:
pulumi.set(__self__, "emails", emails)
if identity_provider_id is not None:
pulumi.set(__self__, "identity_provider_id", identity_provider_id)
@property
@pulumi.getter
def emails(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "emails")
@property
@pulumi.getter(name="identityProviderId")
def identity_provider_id(self) -> Optional[str]:
return pulumi.get(self, "identity_provider_id")
@pulumi.output_type
class AccessGroupRequireOkta(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "identityProviderId":
suggest = "identity_provider_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccessGroupRequireOkta. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccessGroupRequireOkta.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccessGroupRequireOkta.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
identity_provider_id: Optional[str] = None,
names: Optional[Sequence[str]] = None):
"""
:param Sequence[str] names: Friendly name of the Access Group.
"""
if identity_provider_id is not None:
pulumi.set(__self__, "identity_provider_id", identity_provider_id)
if names is not None:
pulumi.set(__self__, "names", names)
@property
@pulumi.getter(name="identityProviderId")
def identity_provider_id(self) -> Optional[str]:
return pulumi.get(self, "identity_provider_id")
@property
@pulumi.getter
def names(self) -> Optional[Sequence[str]]:
"""
Friendly name of the Access Group.
"""
return pulumi.get(self, "names")
@pulumi.output_type
class AccessGroupRequireSaml(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "attributeName":
suggest = "attribute_name"
elif key == "attributeValue":
suggest = "attribute_value"
elif key == "identityProviderId":
suggest = "identity_provider_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccessGroupRequireSaml. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccessGroupRequireSaml.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccessGroupRequireSaml.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
attribute_name: Optional[str] = None,
attribute_value: Optional[str] = None,
identity_provider_id: Optional[str] = None):
if attribute_name is not None:
pulumi.set(__self__, "attribute_name", attribute_name)
if attribute_value is not None:
pulumi.set(__self__, "attribute_value", attribute_value)
if identity_provider_id is not None:
pulumi.set(__self__, "identity_provider_id", identity_provider_id)
@property
@pulumi.getter(name="attributeName")
def attribute_name(self) -> Optional[str]:
return pulumi.get(self, "attribute_name")
@property
@pulumi.getter(name="attributeValue")
def attribute_value(self) | |
"""
This file contains a function for reading in command-line arguments
so BIOM files or tab-delimited files can be read.
The BIOM format is a standardized biological format
that is commonly used to contain biological data.
Tab-delimited files should be supplied with the BIOM-format specified headers (# prefix).
The software can operate in two manners:
import all BIOM files in a folder,
or import separate BIOM files / tab-delimited files
The file also defines a class for a Neo4j driver.
Given a running database, this driver can upload and delete experiments in the database.
"""
__author__ = '<NAME>'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
__license__ = 'Apache 2.0'
import os
import sys
import numpy as np
import pandas as pd
from biom import load_table
import zipfile
import yaml
import tempfile
import shutil
from pathlib import Path
from biom.parse import MetadataMap
import logging.handlers
from mako.scripts.utils import ParentDriver, _create_logger, \
_read_config, _get_path, _run_subbatch
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# handler to sys.stdout
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
# handler to file
# only handler with 'w' mode, rest is 'a'
# once this handler is started, the file writing is cleared
# other handlers append to the file
def start_biom(inputs):
"""
Takes all input and returns a dictionary of biom files.
If tab-delimited files are supplied, these are combined
into a biom file. These should be specified in the correct order.
This is mostly a utility wrapper, as all biom-related functions
are from biom-format.org.
:param inputs: Dictionary of arguments.
:return:
"""
# handler to file
# construct logger after filepath is provided
_create_logger(inputs['fp'])
if inputs['store_config']:
config = _read_config(inputs)
else:
config = inputs
try:
driver = Biom2Neo(uri=config['address'],
user=config['username'],
password=config['password'],
filepath=inputs['fp'],
encrypted=inputs['encryption'])
except KeyError:
logger.error("Login information not specified in arguments.", exc_info=True)
sys.exit()
check_arguments(inputs)
# Only process count files if present
if inputs['biom_file'] is not None:
try:
for x in inputs['biom_file']:
# first check if it is a file or path
logger.info('Working on ' + x + '...')
read_bioms(files=x, filepath=inputs['fp'], driver=driver, obs=inputs['obs'])
except Exception:
logger.error("Failed to import BIOM files.", exc_info=True)
if inputs['qza'] is not None:
try:
for x in inputs['qza']:
# first check if it is a file or path
logger.info('Working on ' + x + '...')
read_qiime2(files=x, filepath=inputs['fp'], driver=driver)
except Exception:
logger.error("Failed to import Qiime 2 artifact.", exc_info=True)
if inputs['count_table'] is not None:
try:
for i in range(len(inputs['count_table'])):
name, biomtab = read_tabs(inputs=inputs, i=i)
driver.convert_biom(biomfile=biomtab, exp_id=name, obs=inputs['obs'])
except Exception:
logger.warning("Failed to combine input files.", exc_info=True)
elif inputs['tax_table'] is not None:
try:
for x in inputs['tax_table']:
logger.info('Working on uploading separate taxonomy table ' + x + '...')
name, taxtab = read_taxonomy(filename=x, filepath=inputs['fp'])
driver.convert_taxonomy(taxonomy_table=taxtab, exp_id=name)
except Exception:
logger.warning("Failed to upload taxonomy table.", exc_info=True)
if inputs['delete']:
for name in inputs['delete']:
driver.delete_biom(name)
driver.close()
logger.info('Completed neo4biom operations! ')
def check_arguments(inputs):
"""
Runs some initial checks before importing;
for example, whether each OTU table has a matching taxonomy table,
and if there are sample metadata files, whether each OTU table has one.
:param inputs: Arguments with files to import
:return: True if checks passed, False if failed
"""
if inputs['biom_file'] is not None:
logger.info('BIOM file(s) to process: ' + ", \n".join(inputs['biom_file']))
if inputs['qza'] is not None:
logger.info('Qiime 2 archive file(s) to process: ' + ", \n".join(inputs['qza']))
if inputs['count_table'] is not None:
logger.info('Tab-delimited OTU table(s) to process: \n' + ", \n".join(inputs['count_table']))
if inputs['tax_table'] is not None:
logger.info('Tab-delimited taxonomy table(s) to process: \n' + ", \n".join(inputs['tax_table']))
if inputs['sample_meta'] is not None:
if len(inputs['count_table']) is not len(inputs['sample_data']):
logger.error("Add a sample data table for every OTU table!", exc_info=True)
sys.exit()
if inputs['taxon_meta'] is not None:
if len(inputs['count_table']) is not len(inputs['taxon_meta']):
logger.error("Add a metadata table for every OTU table!", exc_info=True)
sys.exit()
def read_bioms(files, filepath, driver, obs=True):
"""
Reads BIOM files from a list and calls the driver for each file.
4 ways of giving the filepaths are possible:
1. A complete filepath to the directory containing BIOMS
2. A complete filepath to the BIOM file(s)
3. Filename of BIOM file(s) stored the current working directory
4. Filename of BIOM file(s) stored in the filepath directory
The filename can also be a relative filepath.
:param files: List of BIOM filenames or file directories
:param filepath: Filepath where files are stored / written
:param obs: If false, counts aren't uploaded.
:param driver: Biom2Neo driver instance
:return:
"""
if os.path.isdir(files):
for y in os.listdir(files):
biomtab = load_table(files + '/' + y)
name = y.split(".")[0]
driver.convert_biom(biomfile=biomtab, exp_id=name)
else:
checked_path = _get_path(path=files, default=filepath)
if checked_path:
biomtab = load_table(checked_path)
name = files.split('/')[-1]
name = name.split('\\')[-1]
name = name.split(".")[0]
driver.convert_biom(biomfile=biomtab, exp_id=name)
else:
logger.error("Unable to read BIOM file, path is incorrect.")
sys.exit()
def read_tabs(inputs, i):
"""
Reads tab-delimited files from lists of filenames.
These are then combined into a BIOM file.
The driver is then called to write the BIOM file to the database.
:param inputs:
:param i:
:param driver:
:return:
"""
input_fp = inputs['count_table'][i]
filepath = inputs['fp']
sample_metadata_fp = None
observation_metadata_fp = None
file_prefix = ''
checked_path = _get_path(path=input_fp, default=filepath)
if checked_path:
biomtab = load_table(checked_path)
file_prefix = ''
if os.path.isfile(os.getcwd() + '/' + input_fp):
file_prefix = os.getcwd() + '/'
elif os.path.isfile(filepath + '/' + input_fp):
file_prefix = filepath + '/'
else:
logger.warning("Failed to combine input files.", exc_info=True)
sys.exit()
name = input_fp.split('/')[-1]
name = name.split('\\')[-1]
name = name.split(".")[0]
# sample metadata is not mandatory, catches None
try:
sample_metadata_fp = file_prefix + inputs['sample_meta'][i]
except TypeError or KeyError:
pass
if sample_metadata_fp is not None:
sample_f = open(sample_metadata_fp, 'r')
sample_data = MetadataMap.from_file(sample_f)
sample_f.close()
biomtab.add_metadata(sample_data, axis='sample')
# taxonomy is recommended, many functions don't work without it
# still capture None
try:
observation_metadata_fp = file_prefix + inputs['tax_table'][i]
except TypeError or KeyError:
pass
if observation_metadata_fp is not None:
obs_f = open(observation_metadata_fp, 'r')
obs_data = MetadataMap.from_file(obs_f)
obs_f.close()
# for taxonomy collapsing,
# metadata variable needs to be a complete list
# not separate entries for each tax level
for b in list(obs_data):
tax = list()
for l in list(obs_data[b]):
tax.append(obs_data[b][l])
obs_data[b].pop(l, None)
obs_data[b]['taxonomy'] = tax
biomtab.add_metadata(obs_data, axis='observation')
# observation metadata is not mandatory, catches None
try:
observation_metadata_fp = file_prefix + inputs['taxon_meta'][i]
except TypeError or KeyError:
pass
if observation_metadata_fp is not None:
obs_f = open(observation_metadata_fp, 'r')
obs_data = MetadataMap.from_file(obs_f)
obs_f.close()
biomtab.add_metadata(obs_data, axis='observation')
return name, biomtab
def read_taxonomy(filename, filepath):
"""
Reads tab-delimited file representing a taxonomy table.
:param filename: Full or incomplete filepath to taxonomy
:param filepath: Extra filepath
:return:
"""
taxtab = None
checked_path = _get_path(path=filename, default=filepath)
if checked_path:
taxtab = pd.read_csv(checked_path, sep='\t', index_col=0)
else:
logger.warning("Failed to read taxonomy table.", exc_info=True)
sys.exit()
name = filename.split('/')[-1]
name = name.split('\\')[-1]
name = name.split(".")[0]
# sample metadata is not mandatory, catches None
return name, taxtab
def read_qiime2(files, filepath, driver):
"""
Reads a qza Qiime2 artifact and writes this to the Neo4j database.
The type information is used to create a new node label in the Neo4j database.
The uuid is used to create an Experiment node that links the artifact data.
If the artifact is an OTU table,
the import proceeds as if it was a BIOM file.
If the artifact is a taxonomy table,
the import proceeds as if it was a tab-delimited taxonomy file.
To avoid installing all of Qiime 2 as a dependency,
mako contains some utility functions that handle the unzipping
and reading of the Artifact files.
:param files: List of BIOM filenames or file directories
:param filepath: Filepath where files are stored / written
:param driver: Biom2Neo driver instance
:return:
"""
if os.path.isdir(files):
for y in os.listdir(files):
filepath = files + '/' + y
_upload_qiime2(filepath, driver)
else:
checked_path = _get_path(path=files, default=filepath)
if checked_path:
_upload_qiime2(checked_path, driver)
else:
logger.error("Unable to read qza file, path is incorrect.")
sys.exit()
def _upload_qiime2(filepath, driver):
artifact, file = _load_qiime2(filepath)
if artifact['type'] == 'FeatureTable[Frequency]':
name = artifact['uuid']
driver.convert_biom(biomfile=file, exp_id=name)
driver.query("MATCH (n:Experiment {name: '" + name +
"'}) SET n.type = '" + artifact['type'] +
"' SET n.format = '" + artifact['format'] +
"' RETURN n.format")
elif artifact['type'] == 'FeatureData[Taxonomy]':
name = artifact['uuid']
driver.convert_taxonomy(file, name)
def _load_qiime2(filepath):
"""
Loads a Qiime2 Artifact object and
returns this object as a tuple of metadata and BIOM table.
:param filepath: Complete filepath to Qiime2 object
:return:
"""
filepath = Path(filepath)
| |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for Markov Chain Monte Carlo (MCMC) sampling.
@@effective_sample_size
@@potential_scale_reduction
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import stats
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import nest_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
__all__ = [
'effective_sample_size',
'potential_scale_reduction',
]
def effective_sample_size(states,
filter_threshold=0.,
filter_beyond_lag=None,
filter_beyond_positive_pairs=False,
cross_chain_dims=None,
validate_args=False,
name=None):
"""Estimate a lower bound on effective sample size for each independent chain.
Roughly speaking, "effective sample size" (ESS) is the size of an iid sample
with the same variance as `state`.
More precisely, given a stationary sequence of possibly correlated random
variables `X_1, X_2, ..., X_N`, identically distributed, ESS is the
number such that
```
Variance{ N**-1 * Sum{X_i} } = ESS**-1 * Variance{ X_1 }.
```
If the sequence is uncorrelated, `ESS = N`. If the sequence is positively
auto-correlated, `ESS` will be less than `N`. If there are negative
correlations, then `ESS` can exceed `N`.
Some math shows that, with `R_k` the auto-correlation sequence,
`R_k := Covariance{X_1, X_{1+k}} / Variance{X_1}`, we have
```
ESS(N) = N / [ 1 + 2 * ( (N - 1) / N * R_1 + ... + 1 / N * R_{N-1} ) ]
```
This function estimates the above by first estimating the auto-correlation.
Since `R_k` must be estimated using only `N - k` samples, it becomes
progressively noisier for larger `k`. For this reason, the summation over
`R_k` should be truncated at some number `filter_beyond_lag < N`. This
function provides two methods to perform this truncation.
* `filter_threshold` -- since many MCMC methods generate chains where `R_k >
0`, a reasonable criterion is to truncate at the first index where the
estimated auto-correlation becomes negative. This method does not estimate
the `ESS` of super-efficient chains (where `ESS > N`) correctly.
* `filter_beyond_positive_pairs` -- reversible MCMC chains produce
an auto-correlation sequence with the property that pairwise sums of the
elements of that sequence are positive [Geyer][1], i.e.
`R_{2k} + R_{2k + 1} > 0` for `k in {0, ..., N/2}`. Deviations are only
possible due to noise. This method truncates the auto-correlation sequence
where the pairwise sums become non-positive.
The arguments `filter_beyond_lag`, `filter_threshold` and
`filter_beyond_positive_pairs` are filters intended to remove noisy tail terms
from `R_k`. You can combine `filter_beyond_lag` with `filter_threshold` or
`filter_beyond_positive_pairs. E.g., combining `filter_beyond_lag` and
`filter_beyond_positive_pairs` means that terms are removed if they were to be
filtered under the `filter_beyond_lag` OR `filter_beyond_positive_pairs`
criteria.
This function can also compute cross-chain ESS following
[Vehtari et al. (2019)][2] by specifying the `cross_chain_dims` argument.
Cross-chain ESS takes into account the cross-chain variance to reduce the ESS
in cases where the chains are not mixing well. In general, this will be a
smaller number than computing the ESS for individual chains and then summing
them. In an extreme case where the chains have fallen into K non-mixing modes,
this function will return ESS ~ K. Even when chains are mixing well it is
still preferrable to compute cross-chain ESS via this method because it will
reduce the noise in the estimate of `R_k`, reducing the need for truncation.
Args:
states: `Tensor` or Python structure of `Tensor` objects. Dimension zero
should index identically distributed states.
filter_threshold: `Tensor` or Python structure of `Tensor` objects. Must
broadcast with `state`. The sequence of auto-correlations is truncated
after the first appearance of a term less than `filter_threshold`.
Setting to `None` means we use no threshold filter. Since `|R_k| <= 1`,
setting to any number less than `-1` has the same effect. Ignored if
`filter_beyond_positive_pairs` is `True`.
filter_beyond_lag: `Tensor` or Python structure of `Tensor` objects. Must
be `int`-like and scalar valued. The sequence of auto-correlations is
truncated to this length. Setting to `None` means we do not filter based
on the size of lags.
filter_beyond_positive_pairs: Python boolean. If `True`, only consider the
initial auto-correlation sequence where the pairwise sums are positive.
cross_chain_dims: An integer `Tensor` or a structure of integer `Tensors`
corresponding to each state component. If a list of `states` is provided,
then this argument should also be a list of the same length. Which
dimensions of `states` to treat as independent chains that ESS will be
summed over. If `None`, no summation is performed. Note this requires at
least 2 chains.
validate_args: Whether to add runtime checks of argument validity. If False,
and arguments are incorrect, correct behavior is not guaranteed.
name: `String` name to prepend to created ops.
Returns:
ess: `Tensor` structure parallel to `states`. The effective sample size of
each component of `states`. If `cross_chain_dims` is None, the shape will
be `states.shape[1:]`. Otherwise, the shape is `tf.reduce_mean(states,
cross_chain_dims).shape[1:]`.
Raises:
ValueError: If `states` and `filter_threshold` or `states` and
`filter_beyond_lag` are both structures of different shapes.
ValueError: If `cross_chain_dims` is not `None` and there are less than 2
chains.
#### Examples
We use ESS to estimate standard error.
```
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 1000 states from one chain.
states = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=tf.constant([0., 0.]),
trace_fn=None,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
states.shape
==> (1000, 2)
ess = effective_sample_size(states, filter_beyond_positive_pairs=True)
==> Shape (2,) Tensor
mean, variance = tf.nn.moments(states, axis=0)
standard_error = tf.sqrt(variance / ess)
```
#### References
[1]: <NAME>, Practical Markov chain Monte Carlo (with discussion).
Statistical Science, 7:473-511, 1992.
[2]: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Rank-normalization, folding, and localization: An improved R-hat
for assessing convergence of MCMC, 2019. Retrieved from
http://arxiv.org/abs/1903.08008
"""
if cross_chain_dims is None:
cross_chain_dims = nest_util.broadcast_structure(states, None)
filter_beyond_lag = nest_util.broadcast_structure(states, filter_beyond_lag)
filter_threshold = nest_util.broadcast_structure(states, filter_threshold)
filter_beyond_positive_pairs = nest_util.broadcast_structure(
states, filter_beyond_positive_pairs)
# Process items, one at a time.
def single_state(*args):
return _effective_sample_size_single_state(
*args, validate_args=validate_args)
with tf.name_scope('effective_sample_size' if name is None else name):
return nest.map_structure_up_to(
states,
single_state,
states, filter_beyond_lag, filter_threshold,
filter_beyond_positive_pairs, cross_chain_dims)
def _effective_sample_size_single_state(states, filter_beyond_lag,
filter_threshold,
filter_beyond_positive_pairs,
cross_chain_dims,
validate_args):
"""ESS computation for one single Tensor argument."""
with tf.name_scope('effective_sample_size_single_state'):
states = tf.convert_to_tensor(states, name='states')
dt = states.dtype
# filter_beyond_lag == None ==> auto_corr is the full sequence.
auto_cov = stats.auto_correlation(
states, axis=0, max_lags=filter_beyond_lag, normalize=False)
n = _axis_size(states, axis=0)
if cross_chain_dims is not None:
num_chains = _axis_size(states, cross_chain_dims)
num_chains_ = tf.get_static_value(num_chains)
assertions = []
msg = ('When `cross_chain_dims` is not `None`, there must be > 1 chain '
'in `states`.')
if num_chains_ is not None:
if num_chains_ < 2:
raise ValueError(msg)
elif validate_args:
assertions.append(
assert_util.assert_greater(num_chains, 1., message=msg))
with tf.control_dependencies(assertions):
# We're computing the R[k] from equation 10 of Vehtari et al.
# (2019):
#
# R[k] := 1 - (W - 1/C * Sum_{c=1}^C s_c**2 R[k, c]) / (var^+),
#
# where:
# C := number of chains
# N := length of chains
# x_hat[c] := 1 / N Sum_{n=1}^N x[n, c], chain mean.
# x_hat := 1 / C Sum_{c=1}^C x_hat[c], overall mean.
# W := 1/C Sum_{c=1}^C s_c**2, within-chain variance.
# B := N / (C - 1) Sum_{c=1}^C (x_hat[c] - x_hat)**2, between chain
# variance.
# s_c**2 := 1 / (N - 1) Sum_{n=1}^N (x[n, c] - x_hat[c])**2, chain
# variance
# R[k, m] := auto_corr[k, m, ...], auto-correlation indexed by chain.
# var^+ := (N - 1) / N * W + B / N
cross_chain_dims = ps.non_negative_axis(
cross_chain_dims, ps.rank(states))
# B / N
between_chain_variance_div_n = _reduce_variance(
tf.reduce_mean(states, axis=0),
biased=False, # This makes the denominator be C - 1.
| |
<filename>lib/rtorrent/__init__.py<gh_stars>100-1000
# Copyright (c) 2013 <NAME>, <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import urllib
import os.path
import time
import xmlrpclib
from rtorrent.common import find_torrent, \
is_valid_port, convert_version_tuple_to_str
from rtorrent.lib.torrentparser import TorrentParser
from rtorrent.lib.xmlrpc.http import HTTPServerProxy
from rtorrent.lib.xmlrpc.scgi import SCGIServerProxy
from rtorrent.rpc import Method
from rtorrent.lib.xmlrpc.basic_auth import BasicAuthTransport
from rtorrent.torrent import Torrent
from rtorrent.group import Group
import rtorrent.rpc # @UnresolvedImport
__version__ = "0.2.9"
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__license__ = "MIT"
MIN_RTORRENT_VERSION = (0, 8, 1)
MIN_RTORRENT_VERSION_STR = convert_version_tuple_to_str(MIN_RTORRENT_VERSION)
class RTorrent:
""" Create a new rTorrent connection """
rpc_prefix = None
def __init__(self, uri, username=None, password=<PASSWORD>,
verify=False, sp=None, sp_kwargs=None):
self.uri = uri # : From X{__init__(self, url)}
self.username = username
self.password = password
self.schema = urllib.splittype(uri)[0]
if sp:
self.sp = sp
elif self.schema in ['http', 'https']:
self.sp = HTTPServerProxy
elif self.schema == 'scgi':
self.sp = SCGIServerProxy
else:
raise NotImplementedError()
self.sp_kwargs = sp_kwargs or {}
self.torrents = [] # : List of L{Torrent} instances
self._rpc_methods = [] # : List of rTorrent RPC methods
self._torrent_cache = []
self._client_version_tuple = ()
if verify is True:
self._verify_conn()
def _get_conn(self):
"""Get ServerProxy instance"""
if self.username is not None and self.password is not None:
if self.schema == 'scgi':
raise NotImplementedError()
return self.sp(
self.uri,
transport=BasicAuthTransport(self.username, self.password),
**self.sp_kwargs
)
return self.sp(self.uri, **self.sp_kwargs)
def _verify_conn(self):
# check for rpc methods that should be available
assert "system.client_version" in self._get_rpc_methods(), "Required RPC method not available."
assert "system.library_version" in self._get_rpc_methods(), "Required RPC method not available."
# minimum rTorrent version check
assert self._meets_version_requirement() is True,\
"Error: Minimum rTorrent version required is {0}".format(
MIN_RTORRENT_VERSION_STR)
def _meets_version_requirement(self):
return self._get_client_version_tuple() >= MIN_RTORRENT_VERSION
def _get_client_version_tuple(self):
conn = self._get_conn()
if not self._client_version_tuple:
if not hasattr(self, "client_version"):
setattr(self, "client_version",
conn.system.client_version())
rtver = getattr(self, "client_version")
self._client_version_tuple = tuple([int(i) for i in
rtver.split(".")])
return self._client_version_tuple
def _update_rpc_methods(self):
self._rpc_methods = self._get_conn().system.listMethods()
return self._rpc_methods
def _get_rpc_methods(self):
""" Get list of raw RPC commands
@return: raw RPC commands
@rtype: list
"""
return(self._rpc_methods or self._update_rpc_methods())
def get_torrents(self, view="main"):
"""Get list of all torrents in specified view
@return: list of L{Torrent} instances
@rtype: list
@todo: add validity check for specified view
"""
self.torrents = []
methods = rtorrent.torrent.methods
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self)]
m = rtorrent.rpc.Multicall(self)
m.add("d.multicall", view, "d.get_hash=",
*[method.rpc_call + "=" for method in retriever_methods])
results = m.call()[0] # only sent one call, only need first result
for result in results:
results_dict = {}
# build results_dict
for m, r in zip(retriever_methods, result[1:]): # result[0] is the info_hash
results_dict[m.varname] = rtorrent.rpc.process_result(m, r)
self.torrents.append(
Torrent(self, info_hash=result[0], **results_dict)
)
self._manage_torrent_cache()
return(self.torrents)
def _manage_torrent_cache(self):
"""Carry tracker/peer/file lists over to new torrent list"""
for torrent in self._torrent_cache:
new_torrent = rtorrent.common.find_torrent(torrent.info_hash,
self.torrents)
if new_torrent is not None:
new_torrent.files = torrent.files
new_torrent.peers = torrent.peers
new_torrent.trackers = torrent.trackers
self._torrent_cache = self.torrents
def _get_load_function(self, file_type, start, verbose):
"""Determine correct "load torrent" RPC method"""
func_name = None
if file_type == "url":
# url strings can be input directly
if start and verbose:
func_name = "load_start_verbose"
elif start:
func_name = "load_start"
elif verbose:
func_name = "load_verbose"
else:
func_name = "load"
elif file_type in ["file", "raw"]:
if start and verbose:
func_name = "load_raw_start_verbose"
elif start:
func_name = "load_raw_start"
elif verbose:
func_name = "load_raw_verbose"
else:
func_name = "load_raw"
return(func_name)
def load_torrent(self, torrent, start=False, verbose=False, verify_load=True):
"""
Loads torrent into rTorrent (with various enhancements)
@param torrent: can be a url, a path to a local file, or the raw data
of a torrent file
@type torrent: str
@param start: start torrent when loaded
@type start: bool
@param verbose: print error messages to rTorrent log
@type verbose: bool
@param verify_load: verify that torrent was added to rTorrent successfully
@type verify_load: bool
@return: Depends on verify_load:
- if verify_load is True, (and the torrent was
loaded successfully), it'll return a L{Torrent} instance
- if verify_load is False, it'll return None
@rtype: L{Torrent} instance or None
@raise AssertionError: If the torrent wasn't successfully added to rTorrent
- Check L{TorrentParser} for the AssertionError's
it raises
@note: Because this function includes url verification (if a url was input)
as well as verification as to whether the torrent was successfully added,
this function doesn't execute instantaneously. If that's what you're
looking for, use load_torrent_simple() instead.
"""
p = self._get_conn()
tp = TorrentParser(torrent)
torrent = xmlrpclib.Binary(tp._raw_torrent)
info_hash = tp.info_hash
func_name = self._get_load_function("raw", start, verbose)
# load torrent
getattr(p, func_name)(torrent)
if verify_load:
MAX_RETRIES = 3
i = 0
while i < MAX_RETRIES:
self.get_torrents()
if info_hash in [t.info_hash for t in self.torrents]:
break
# was still getting AssertionErrors, delay should help
time.sleep(1)
i += 1
assert info_hash in [t.info_hash for t in self.torrents],\
"Adding torrent was unsuccessful."
return(find_torrent(info_hash, self.torrents))
def load_torrent_simple(self, torrent, file_type,
start=False, verbose=False):
"""Loads torrent into rTorrent
@param torrent: can be a url, a path to a local file, or the raw data
of a torrent file
@type torrent: str
@param file_type: valid options: "url", "file", or "raw"
@type file_type: str
@param start: start torrent when loaded
@type start: bool
@param verbose: print error messages to rTorrent log
@type verbose: bool
@return: None
@raise AssertionError: if incorrect file_type is specified
@note: This function was written for speed, it includes no enhancements.
If you input a url, it won't check if it's valid. You also can't get
verification that the torrent was successfully added to rTorrent.
Use load_torrent() if you would like these features.
"""
p = self._get_conn()
assert file_type in ["raw", "file", "url"], \
"Invalid file_type, options are: 'url', 'file', 'raw'."
func_name = self._get_load_function(file_type, start, verbose)
if file_type == "file":
# since we have to assume we're connected to a remote rTorrent
# client, we have to read the file and send it to rT as raw
assert os.path.isfile(torrent), \
"Invalid path: \"{0}\"".format(torrent)
torrent = open(torrent, "rb").read()
if file_type in ["raw", "file"]:
finput = xmlrpclib.Binary(torrent)
elif file_type == "url":
finput = torrent
getattr(p, func_name)(finput)
def get_views(self):
p = self._get_conn()
return p.view_list()
def create_group(self, name, persistent=True, view=None):
p = self._get_conn()
if persistent is True:
p.group.insert_persistent_view('', name)
else:
assert view is not None, "view parameter required on non-persistent groups"
p.group.insert('', name, view)
self._update_rpc_methods()
def get_group(self, name):
assert name is not None, "group name required"
group = Group(self, name)
group.update()
return group
def set_dht_port(self, port):
"""Set DHT port
@param port: port
@type port: int
@raise AssertionError: if invalid port is given
"""
assert is_valid_port(port), "Valid port range is 0-65535"
self.dht_port = self._p.set_dht_port(port)
def enable_check_hash(self):
"""Alias for set_check_hash(True)"""
self.set_check_hash(True)
def disable_check_hash(self):
"""Alias for set_check_hash(False)"""
self.set_check_hash(False)
def find_torrent(self, info_hash):
"""Frontend for rtorrent.common.find_torrent"""
return(rtorrent.common.find_torrent(info_hash, self.get_torrents()))
def poll(self):
""" poll rTorrent to get latest torrent/peer/tracker/file information
@note: This essentially refreshes every aspect of the rTorrent
connection, so it can be very slow if working with a remote
connection that has a lot of torrents loaded.
@return: None
"""
self.update()
torrents = self.get_torrents()
for t in torrents:
t.poll()
def update(self):
"""Refresh rTorrent client info
@note: All fields are stored as attributes to self.
@return: None
"""
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self)]
for method in retriever_methods:
multicall.add(method)
multicall.call()
def _build_class_methods(class_obj):
# multicall add |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.