code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import cm
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph
class LegalDoc:
def __init__(self, path):
self.path = path
styles = getSampleStyleSheet()
self._styleN = styles["Normal"]
self._styleH1 = styles["Heading1"]
self._styleH2 = styles["Heading2"]
self.page = 0
doc = BaseDocTemplate(self.path, pagesize=A4)
frame = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height - 2 * cm, id="normal")
template = PageTemplate(id="legal_doc", frames=frame, onPage=self.header_footer)
doc.addPageTemplates([template])
text = []
for i in range(111):
text.append(Paragraph("This is line %d." % i, self._styleN))
doc.build(text)
def header_footer(self, canvas, doc):
self.page += 1
canvas.saveState()
P = Paragraph("This is a multi-line header. It goes on every page. " * 2, self._styleN)
w, h = P.wrap(doc.width, doc.topMargin)
P.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin - h)
canvas.restoreState()
canvas.saveState()
P = Paragraph("This is a multi-line footer:%s. It goes on every page. " % self.page, self._styleN)
w, h = P.wrap(doc.width, doc.bottomMargin)
P.drawOn(canvas, doc.leftMargin, h)
canvas.restoreState()
|
[
"reportlab.platypus.PageTemplate",
"reportlab.platypus.Paragraph",
"reportlab.lib.styles.getSampleStyleSheet",
"reportlab.platypus.BaseDocTemplate",
"reportlab.platypus.Frame"
] |
[((298, 319), 'reportlab.lib.styles.getSampleStyleSheet', 'getSampleStyleSheet', ([], {}), '()\n', (317, 319), False, 'from reportlab.lib.styles import getSampleStyleSheet\n'), ((483, 522), 'reportlab.platypus.BaseDocTemplate', 'BaseDocTemplate', (['self.path'], {'pagesize': 'A4'}), '(self.path, pagesize=A4)\n', (498, 522), False, 'from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph\n'), ((539, 628), 'reportlab.platypus.Frame', 'Frame', (['doc.leftMargin', 'doc.bottomMargin', 'doc.width', '(doc.height - 2 * cm)'], {'id': '"""normal"""'}), "(doc.leftMargin, doc.bottomMargin, doc.width, doc.height - 2 * cm, id=\n 'normal')\n", (544, 628), False, 'from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph\n'), ((643, 712), 'reportlab.platypus.PageTemplate', 'PageTemplate', ([], {'id': '"""legal_doc"""', 'frames': 'frame', 'onPage': 'self.header_footer'}), "(id='legal_doc', frames=frame, onPage=self.header_footer)\n", (655, 712), False, 'from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph\n'), ((1006, 1095), 'reportlab.platypus.Paragraph', 'Paragraph', (["('This is a multi-line header. It goes on every page. ' * 2)", 'self._styleN'], {}), "('This is a multi-line header. It goes on every page. ' * 2,\n self._styleN)\n", (1015, 1095), False, 'from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph\n'), ((1283, 1383), 'reportlab.platypus.Paragraph', 'Paragraph', (["('This is a multi-line footer:%s. It goes on every page. ' % self.page)", 'self._styleN'], {}), "('This is a multi-line footer:%s. It goes on every page. ' %\n self.page, self._styleN)\n", (1292, 1383), False, 'from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph\n'), ((826, 873), 'reportlab.platypus.Paragraph', 'Paragraph', (["('This is line %d.' % i)", 'self._styleN'], {}), "('This is line %d.' % i, self._styleN)\n", (835, 873), False, 'from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph\n')]
|
from datetime import datetime
import uuid
from sqlalchemy.exc import IntegrityError
from dataservice.api.study.models import Study
from dataservice.api.participant.models import Participant
from dataservice.api.outcome.models import Outcome
from dataservice.extensions import db
from tests.utils import FlaskTestCase
class ModelTest(FlaskTestCase):
"""
Test Outcome database model
"""
def test_create(self):
"""
Test create outcome
"""
# Create study
study = Study(external_id='phs001')
# Create and save participant
participant_id = 'Test subject 0'
p = Participant(external_id=participant_id, is_proband=True,
study=study)
db.session.add(p)
db.session.commit()
# Create outcomes
data = {
'external_id': 'test_0',
'vital_status': 'Alive',
'disease_related': False,
'age_at_event_days': 120,
'participant_id': p.kf_id
}
dt = datetime.now()
o1 = Outcome(**data)
db.session.add(o1)
data['vital_status'] = 'Deceased'
data['disease_related'] = 'True'
o2 = Outcome(**data)
db.session.add(o2)
db.session.commit()
self.assertEqual(Outcome.query.count(), 2)
new_outcome = Outcome.query.all()[1]
self.assertGreater(new_outcome.created_at, dt)
self.assertGreater(new_outcome.modified_at, dt)
self.assertIs(type(uuid.UUID(new_outcome.uuid)), uuid.UUID)
self.assertEqual(new_outcome.vital_status, data['vital_status'])
self.assertEqual(new_outcome.disease_related,
data['disease_related'])
def test_create_via_participant(self):
"""
Test create outcomes via creation of participant
"""
outcomes, p = self._create_outcomes()
oc = ['Deceased', 'Alive']
# Check outcomes were created
self.assertEqual(Outcome.query.count(), 2)
# Check Particpant has the outcomes
for o in Participant.query.first().outcomes:
self.assertIn(o.vital_status, oc)
# Outcomes have the participant
p = Participant.query.first()
for o in Outcome.query.all():
self.assertEqual(o.participant_id, p.kf_id)
def test_find_outcome(self):
"""
Test find one outcome
"""
outcomes, p = self._create_outcomes()
# Find outcome
oc = ['Deceased', 'Alive']
o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none()
self.assertEqual(o.vital_status, oc[0])
def test_update_outcome(self):
"""
Test update outcome
"""
outcomes, p = self._create_outcomes()
# Update and save
oc = ['Deceased', 'Alive']
o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none()
o.outcome = oc[1]
db.session.commit()
# Check updated values
o = Outcome.query.filter_by(vital_status=oc[1]).one_or_none()
self.assertIsNot(o, None)
def test_delete_outcome(self):
"""
Test delete outcome
"""
outcomes, p = self._create_outcomes()
# Choose one and delete it
oc = ['Deceased', 'Alive']
o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none()
db.session.delete(o)
db.session.commit()
o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none()
self.assertIs(o, None)
outcomes = [_o for _o in p.outcomes]
self.assertNotIn(o, outcomes)
def test_delete_outcome_via_participant(self):
"""
Test delete related outcomes via deletion of participant
"""
outcomes, p = self._create_outcomes()
# Delete participant
db.session.delete(p)
db.session.commit()
# Check that outcomes have been deleted
oc = ['Deceased', 'Alive']
o1 = Outcome.query.filter_by(vital_status=oc[0]).one_or_none()
o2 = Outcome.query.filter_by(vital_status=oc[1]).one_or_none()
self.assertIs(o1, None)
self.assertIs(o2, None)
def test_not_null_constraint(self):
"""
Test that a outcome cannot be created without required
parameters such as participant_id
"""
# Create outcome
data = {
'vital_status': 'Alive',
# non-existent required param: participant_id
}
o = Outcome(**data)
# Add to db
self.assertRaises(IntegrityError, db.session.add(o))
def test_foreign_key_constraint(self):
"""
Test that a outcome cannot be created without an existing
reference Participant. This checks foreign key constraint
"""
# Create outcome
data = {
'vital_status': 'Alive',
'participant_id': '' # empty blank foreign key
}
o = Outcome(**data)
# Add to db
self.assertRaises(IntegrityError, db.session.add(o))
def _create_outcomes(self):
"""
Create outcome and required entities
"""
# Create study
study = Study(external_id='phs001')
# Create two outcomes
oc = ['Deceased', 'Alive']
o1 = Outcome(vital_status=oc[0])
o2 = Outcome(vital_status=oc[1])
p = Participant(external_id='p1', is_proband=True, study=study)
# Add to participant and save
p.outcomes.extend([o1, o2])
db.session.add(p)
db.session.commit()
return [o1, o2], p
|
[
"dataservice.api.outcome.models.Outcome",
"dataservice.api.participant.models.Participant",
"uuid.UUID",
"dataservice.api.outcome.models.Outcome.query.filter_by",
"dataservice.extensions.db.session.commit",
"dataservice.extensions.db.session.delete",
"dataservice.extensions.db.session.add",
"datetime.datetime.now",
"dataservice.api.outcome.models.Outcome.query.all",
"dataservice.api.outcome.models.Outcome.query.count",
"dataservice.api.study.models.Study",
"dataservice.api.participant.models.Participant.query.first"
] |
[((520, 547), 'dataservice.api.study.models.Study', 'Study', ([], {'external_id': '"""phs001"""'}), "(external_id='phs001')\n", (525, 547), False, 'from dataservice.api.study.models import Study\n'), ((640, 709), 'dataservice.api.participant.models.Participant', 'Participant', ([], {'external_id': 'participant_id', 'is_proband': '(True)', 'study': 'study'}), '(external_id=participant_id, is_proband=True, study=study)\n', (651, 709), False, 'from dataservice.api.participant.models import Participant\n'), ((742, 759), 'dataservice.extensions.db.session.add', 'db.session.add', (['p'], {}), '(p)\n', (756, 759), False, 'from dataservice.extensions import db\n'), ((768, 787), 'dataservice.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (785, 787), False, 'from dataservice.extensions import db\n'), ((1043, 1057), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1055, 1057), False, 'from datetime import datetime\n'), ((1071, 1086), 'dataservice.api.outcome.models.Outcome', 'Outcome', ([], {}), '(**data)\n', (1078, 1086), False, 'from dataservice.api.outcome.models import Outcome\n'), ((1095, 1113), 'dataservice.extensions.db.session.add', 'db.session.add', (['o1'], {}), '(o1)\n', (1109, 1113), False, 'from dataservice.extensions import db\n'), ((1210, 1225), 'dataservice.api.outcome.models.Outcome', 'Outcome', ([], {}), '(**data)\n', (1217, 1225), False, 'from dataservice.api.outcome.models import Outcome\n'), ((1234, 1252), 'dataservice.extensions.db.session.add', 'db.session.add', (['o2'], {}), '(o2)\n', (1248, 1252), False, 'from dataservice.extensions import db\n'), ((1261, 1280), 'dataservice.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1278, 1280), False, 'from dataservice.extensions import db\n'), ((2228, 2253), 'dataservice.api.participant.models.Participant.query.first', 'Participant.query.first', ([], {}), '()\n', (2251, 2253), False, 'from dataservice.api.participant.models import Participant\n'), ((2271, 2290), 'dataservice.api.outcome.models.Outcome.query.all', 'Outcome.query.all', ([], {}), '()\n', (2288, 2290), False, 'from dataservice.api.outcome.models import Outcome\n'), ((2959, 2978), 'dataservice.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2976, 2978), False, 'from dataservice.extensions import db\n'), ((3398, 3418), 'dataservice.extensions.db.session.delete', 'db.session.delete', (['o'], {}), '(o)\n', (3415, 3418), False, 'from dataservice.extensions import db\n'), ((3427, 3446), 'dataservice.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3444, 3446), False, 'from dataservice.extensions import db\n'), ((3857, 3877), 'dataservice.extensions.db.session.delete', 'db.session.delete', (['p'], {}), '(p)\n', (3874, 3877), False, 'from dataservice.extensions import db\n'), ((3886, 3905), 'dataservice.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3903, 3905), False, 'from dataservice.extensions import db\n'), ((4525, 4540), 'dataservice.api.outcome.models.Outcome', 'Outcome', ([], {}), '(**data)\n', (4532, 4540), False, 'from dataservice.api.outcome.models import Outcome\n'), ((4984, 4999), 'dataservice.api.outcome.models.Outcome', 'Outcome', ([], {}), '(**data)\n', (4991, 4999), False, 'from dataservice.api.outcome.models import Outcome\n'), ((5223, 5250), 'dataservice.api.study.models.Study', 'Study', ([], {'external_id': '"""phs001"""'}), "(external_id='phs001')\n", (5228, 5250), False, 'from dataservice.api.study.models import Study\n'), ((5329, 5356), 'dataservice.api.outcome.models.Outcome', 'Outcome', ([], {'vital_status': 'oc[0]'}), '(vital_status=oc[0])\n', (5336, 5356), False, 'from dataservice.api.outcome.models import Outcome\n'), ((5370, 5397), 'dataservice.api.outcome.models.Outcome', 'Outcome', ([], {'vital_status': 'oc[1]'}), '(vital_status=oc[1])\n', (5377, 5397), False, 'from dataservice.api.outcome.models import Outcome\n'), ((5410, 5469), 'dataservice.api.participant.models.Participant', 'Participant', ([], {'external_id': '"""p1"""', 'is_proband': '(True)', 'study': 'study'}), "(external_id='p1', is_proband=True, study=study)\n", (5421, 5469), False, 'from dataservice.api.participant.models import Participant\n'), ((5553, 5570), 'dataservice.extensions.db.session.add', 'db.session.add', (['p'], {}), '(p)\n', (5567, 5570), False, 'from dataservice.extensions import db\n'), ((5579, 5598), 'dataservice.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5596, 5598), False, 'from dataservice.extensions import db\n'), ((1307, 1328), 'dataservice.api.outcome.models.Outcome.query.count', 'Outcome.query.count', ([], {}), '()\n', (1326, 1328), False, 'from dataservice.api.outcome.models import Outcome\n'), ((1355, 1374), 'dataservice.api.outcome.models.Outcome.query.all', 'Outcome.query.all', ([], {}), '()\n', (1372, 1374), False, 'from dataservice.api.outcome.models import Outcome\n'), ((2005, 2026), 'dataservice.api.outcome.models.Outcome.query.count', 'Outcome.query.count', ([], {}), '()\n', (2024, 2026), False, 'from dataservice.api.outcome.models import Outcome\n'), ((2093, 2118), 'dataservice.api.participant.models.Participant.query.first', 'Participant.query.first', ([], {}), '()\n', (2116, 2118), False, 'from dataservice.api.participant.models import Participant\n'), ((4604, 4621), 'dataservice.extensions.db.session.add', 'db.session.add', (['o'], {}), '(o)\n', (4618, 4621), False, 'from dataservice.extensions import db\n'), ((5063, 5080), 'dataservice.extensions.db.session.add', 'db.session.add', (['o'], {}), '(o)\n', (5077, 5080), False, 'from dataservice.extensions import db\n'), ((1516, 1543), 'uuid.UUID', 'uuid.UUID', (['new_outcome.uuid'], {}), '(new_outcome.uuid)\n', (1525, 1543), False, 'import uuid\n'), ((2553, 2596), 'dataservice.api.outcome.models.Outcome.query.filter_by', 'Outcome.query.filter_by', ([], {'vital_status': 'oc[0]'}), '(vital_status=oc[0])\n', (2576, 2596), False, 'from dataservice.api.outcome.models import Outcome\n'), ((2867, 2910), 'dataservice.api.outcome.models.Outcome.query.filter_by', 'Outcome.query.filter_by', ([], {'vital_status': 'oc[0]'}), '(vital_status=oc[0])\n', (2890, 2910), False, 'from dataservice.api.outcome.models import Outcome\n'), ((3023, 3066), 'dataservice.api.outcome.models.Outcome.query.filter_by', 'Outcome.query.filter_by', ([], {'vital_status': 'oc[1]'}), '(vital_status=oc[1])\n', (3046, 3066), False, 'from dataservice.api.outcome.models import Outcome\n'), ((3332, 3375), 'dataservice.api.outcome.models.Outcome.query.filter_by', 'Outcome.query.filter_by', ([], {'vital_status': 'oc[0]'}), '(vital_status=oc[0])\n', (3355, 3375), False, 'from dataservice.api.outcome.models import Outcome\n'), ((3460, 3503), 'dataservice.api.outcome.models.Outcome.query.filter_by', 'Outcome.query.filter_by', ([], {'vital_status': 'oc[0]'}), '(vital_status=oc[0])\n', (3483, 3503), False, 'from dataservice.api.outcome.models import Outcome\n'), ((4003, 4046), 'dataservice.api.outcome.models.Outcome.query.filter_by', 'Outcome.query.filter_by', ([], {'vital_status': 'oc[0]'}), '(vital_status=oc[0])\n', (4026, 4046), False, 'from dataservice.api.outcome.models import Outcome\n'), ((4074, 4117), 'dataservice.api.outcome.models.Outcome.query.filter_by', 'Outcome.query.filter_by', ([], {'vital_status': 'oc[1]'}), '(vital_status=oc[1])\n', (4097, 4117), False, 'from dataservice.api.outcome.models import Outcome\n')]
|
import logging
from werkzeug.utils import cached_property
from wtforms import FormField, Form, StringField
logger = logging.getLogger(__name__)
def get_form_class(validators):
class YearMonthDateForm(Form):
year = StringField(validators=validators)
month = StringField()
@cached_property
def data(self):
data = super().data
try:
return "{year:04d}-{month:02d}".format(
year=int(data["year"]), month=int(data["month"])
)
except (TypeError, ValueError):
return None
return YearMonthDateForm
class MonthYearDateField(FormField):
def __init__(self, validators, **kwargs):
form_class = get_form_class(validators)
super().__init__(form_class, **kwargs)
def process(self, formdata, data=None):
if data is not None:
substrings = data.split("-")
data = {"year": substrings[0], "month": substrings[1]}
super().process(formdata, data)
|
[
"logging.getLogger",
"wtforms.StringField"
] |
[((118, 145), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (135, 145), False, 'import logging\n'), ((230, 264), 'wtforms.StringField', 'StringField', ([], {'validators': 'validators'}), '(validators=validators)\n', (241, 264), False, 'from wtforms import FormField, Form, StringField\n'), ((281, 294), 'wtforms.StringField', 'StringField', ([], {}), '()\n', (292, 294), False, 'from wtforms import FormField, Form, StringField\n')]
|
from abc import abstractmethod, ABC
from datetime import datetime, timezone
from typing import Any, List, Tuple, Dict
from blurr.core.base import BaseSchema
from blurr.core.store_key import Key, KeyType
class StoreSchema(BaseSchema):
pass
class Store(ABC):
""" Base Store that allows for data to be persisted during / after transformation """
@abstractmethod
def get_all(self, identity: str) -> Dict[Key, Any]:
"""
Gets all the items for an identity
"""
raise NotImplementedError()
@abstractmethod
def get(self, key: Key) -> Any:
"""
Gets an item by key
"""
raise NotImplementedError()
def get_range(self,
base_key: Key,
start_time: datetime,
end_time: datetime = None,
count: int = 0) -> List[Tuple[Key, Any]]:
"""
Returns the list of items from the store based on the given time range or count.
:param base_key: Items which don't start with the base_key are filtered out.
:param start_time: Start time to for the range query
:param end_time: End time of the range query. If None count is used.
:param count: The number of items to be returned. Used if end_time is not specified.
"""
if end_time and count:
raise ValueError('Only one of `end` or `count` can be set')
if count:
end_time = datetime.min.replace(
tzinfo=timezone.utc) if count < 0 else datetime.max.replace(tzinfo=timezone.utc)
if end_time < start_time:
start_time, end_time = end_time, start_time
if base_key.key_type == KeyType.TIMESTAMP:
start_key = Key(KeyType.TIMESTAMP, base_key.identity, base_key.group, [], start_time)
end_key = Key(KeyType.TIMESTAMP, base_key.identity, base_key.group, [], end_time)
return self._get_range_timestamp_key(start_key, end_key, count)
else:
return self._get_range_dimension_key(base_key, start_time, end_time, count)
@abstractmethod
def _get_range_timestamp_key(self, start: Key, end: Key,
count: int = 0) -> List[Tuple[Key, Any]]:
"""
Returns the list of items from the store based on the given time range or count.
This is used when the key being used is a TIMESTAMP key.
"""
raise NotImplementedError()
def get_time_range(self, identity, group, start_time, end_time) -> List[Tuple[Key, Any]]:
raise NotImplementedError()
def get_count_range(self, identity, group, time, count):
raise NotImplementedError()
@abstractmethod
def _get_range_dimension_key(self,
base_key: Key,
start_time: datetime,
end_time: datetime,
count: int = 0) -> List[Tuple[Key, Any]]:
"""
Returns the list of items from the store based on the given time range or count.
This is used when the key being used is a DIMENSION key.
"""
raise NotImplementedError()
@staticmethod
def _restrict_items_to_count(items: List[Tuple[Key, Any]], count: int) -> List[Tuple[Key, Any]]:
"""
Restricts items to count number if len(items) is larger than abs(count). This function
assumes that items is sorted by time.
:param items: The items to restrict.
:param count: The number of items returned.
"""
if abs(count) > len(items):
count = Store._sign(count) * len(items)
if count < 0:
return items[count:]
else:
return items[:count]
@abstractmethod
def save(self, key: Key, item: Any) -> None:
"""
Saves an item to store
"""
raise NotImplementedError()
@abstractmethod
def delete(self, key: Key) -> None:
"""
Deletes an item from the store by key
"""
raise NotImplementedError()
@abstractmethod
def finalize(self) -> None:
"""
Finalizes the store by flushing all remaining data to persistence
"""
raise NotImplementedError()
@staticmethod
def _sign(x: int) -> int:
return (1, -1)[x < 0]
|
[
"datetime.datetime.max.replace",
"datetime.datetime.min.replace",
"blurr.core.store_key.Key"
] |
[((1744, 1817), 'blurr.core.store_key.Key', 'Key', (['KeyType.TIMESTAMP', 'base_key.identity', 'base_key.group', '[]', 'start_time'], {}), '(KeyType.TIMESTAMP, base_key.identity, base_key.group, [], start_time)\n', (1747, 1817), False, 'from blurr.core.store_key import Key, KeyType\n'), ((1840, 1911), 'blurr.core.store_key.Key', 'Key', (['KeyType.TIMESTAMP', 'base_key.identity', 'base_key.group', '[]', 'end_time'], {}), '(KeyType.TIMESTAMP, base_key.identity, base_key.group, [], end_time)\n', (1843, 1911), False, 'from blurr.core.store_key import Key, KeyType\n'), ((1458, 1499), 'datetime.datetime.min.replace', 'datetime.min.replace', ([], {'tzinfo': 'timezone.utc'}), '(tzinfo=timezone.utc)\n', (1478, 1499), False, 'from datetime import datetime, timezone\n'), ((1535, 1576), 'datetime.datetime.max.replace', 'datetime.max.replace', ([], {'tzinfo': 'timezone.utc'}), '(tzinfo=timezone.utc)\n', (1555, 1576), False, 'from datetime import datetime, timezone\n')]
|
import time, copy
import asyncio
class TempRefManager:
def __init__(self):
self.refs = []
self.running = False
def add_ref(self, ref, lifetime, on_shutdown):
expiry_time = time.time() + lifetime
self.refs.append((ref, expiry_time, on_shutdown))
def purge_all(self):
"""Purges all refs, regardless of expiry time
Only call this when Seamless is shutting down"""
while len(self.refs):
ref, _, on_shutdown = self.refs.pop(0)
if not on_shutdown:
continue
try:
ref()
except:
pass
def purge(self):
"""Purges expired refs"""
t = time.time()
for item in copy.copy(self.refs):
ref, expiry_time, _ = item
if expiry_time < t:
self.refs.remove(item)
ref()
async def loop(self):
if self.running:
return
self.running = True
while 1:
try:
self.purge()
except Exception:
import traceback
traceback.print_exc()
await asyncio.sleep(0.05)
self.running = False
temprefmanager = TempRefManager()
coro = temprefmanager.loop()
import asyncio
task = asyncio.ensure_future(coro)
import atexit
atexit.register(lambda *args, **kwargs: task.cancel())
|
[
"asyncio.ensure_future",
"asyncio.sleep",
"copy.copy",
"traceback.print_exc",
"time.time"
] |
[((1313, 1340), 'asyncio.ensure_future', 'asyncio.ensure_future', (['coro'], {}), '(coro)\n', (1334, 1340), False, 'import asyncio\n'), ((710, 721), 'time.time', 'time.time', ([], {}), '()\n', (719, 721), False, 'import time, copy\n'), ((742, 762), 'copy.copy', 'copy.copy', (['self.refs'], {}), '(self.refs)\n', (751, 762), False, 'import time, copy\n'), ((206, 217), 'time.time', 'time.time', ([], {}), '()\n', (215, 217), False, 'import time, copy\n'), ((1177, 1196), 'asyncio.sleep', 'asyncio.sleep', (['(0.05)'], {}), '(0.05)\n', (1190, 1196), False, 'import asyncio\n'), ((1137, 1158), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1156, 1158), False, 'import traceback\n')]
|
import yaml
import forest
from forest import main
def test_earth_networks_loader_given_pattern():
loader = forest.Loader.from_pattern("Label", "EarthNetworks*.txt", "earth_networks")
assert isinstance(loader, forest.earth_networks.Loader)
def test_build_loader_given_files():
"""replicate main.py as close as possible"""
files = ["file_20190101T0000Z.nc"]
args = main.parse_args.parse_args(files)
config = forest.config.from_files(args.files, args.file_type)
group = config.file_groups[0]
loader = forest.Loader.group_args(group, args)
assert isinstance(loader, forest.data.DBLoader)
assert loader.locator.paths == files
def test_build_loader_given_database(tmpdir):
"""replicate main.py as close as possible"""
database_file = str(tmpdir / "database.db")
config_file = str(tmpdir / "config.yml")
settings = {
"files": [
{
"label": "UM",
"pattern": "*.nc",
"locator": "database"
}
]
}
with open(config_file, "w") as stream:
yaml.dump(settings, stream)
args = main.parse_args.parse_args([
"--database", database_file,
"--config-file", config_file])
config = forest.config.load_config(args.config_file)
group = config.file_groups[0]
database = forest.db.Database.connect(database_file)
loader = forest.Loader.group_args(group, args, database=database)
database.close()
assert hasattr(loader.locator, "connection")
assert loader.locator.directory is None
def test_build_loader_given_database_and_directory(tmpdir):
database_file = str(tmpdir / "database.db")
config_file = str(tmpdir / "config.yml")
args = main.parse_args.parse_args([
"--database", database_file,
"--config-file", config_file])
label = "UM"
pattern = "*.nc"
directory = "/some/dir"
group = forest.config.FileGroup(
label,
pattern,
directory=directory,
locator="database")
database = forest.db.Database.connect(database_file)
loader = forest.Loader.group_args(group, args, database=database)
database.close()
assert hasattr(loader.locator, "connection")
assert loader.locator.directory == directory
def test_build_loader_given_config_file_pattern(tmpdir):
config_file = str(tmpdir / "config.yml")
path = str(tmpdir / "file_20190101T0000Z.nc")
with open(path, "w"):
pass
args = main.parse_args.parse_args([
"--config-file", config_file])
label = "UM"
pattern = "*.nc"
directory = str(tmpdir)
group = forest.config.FileGroup(
label,
pattern,
directory=directory,
locator="file_system")
loader = forest.Loader.group_args(group, args)
assert loader.locator.paths == [path]
def test_build_loader_given_eida50_file_type():
label = "EIDA50"
pattern = "eida50*.nc"
file_type = "eida50"
loader = forest.Loader.from_pattern(label, pattern, file_type)
assert isinstance(loader, forest.satellite.EIDA50)
assert isinstance(loader.locator, forest.satellite.Locator)
def test_build_loader_given_rdt_file_type():
loader = forest.Loader.from_pattern(
"Label", "*.json", "rdt")
assert isinstance(loader, forest.rdt.Loader)
assert isinstance(loader.locator, forest.rdt.Locator)
def test_replace_dir_given_args_dir_only():
check_replace_dir("args/dir", None, "args/dir")
def test_replace_dir_given_group_dir_only():
check_replace_dir(None, "group/dir", "group/dir")
def test_replace_dir_given_relative_group_dir_appends_to_args_dir():
check_replace_dir("args/dir", "leaf", "args/dir/leaf")
def test_replace_dir_given_absolute_group_dir_overrides_rel_args_dir():
check_replace_dir("args/relative", "/group/absolute", "/group/absolute")
def test_replace_dir_given_absolute_group_dir_overrides_abs_args_dir():
check_replace_dir("/args/absolute", "/group/absolute", "/group/absolute")
def check_replace_dir(args_dir, group_dir, expected):
actual = forest.Loader.replace_dir(args_dir, group_dir)
assert actual == expected
def test_full_pattern_given_name_only():
check_full_pattern("file.nc", None, None, "file.nc")
def test_full_pattern_given_relative_prefix_dir():
check_full_pattern("file.nc", None, "prefix", "prefix/file.nc")
def test_full_pattern_given_relative_leaf_and_prefix_dir():
check_full_pattern("file.nc", "leaf", "prefix", "prefix/leaf/file.nc")
def test_full_pattern_given_absolute_leaf_ignores_relative_prefix():
check_full_pattern("file.nc", "/leaf", "prefix", "/leaf/file.nc")
def test_full_pattern_given_absolute_leaf_ignores_absolute_prefix():
check_full_pattern("file.nc", "/leaf", "/prefix", "/leaf/file.nc")
def check_full_pattern(name, leaf, prefix, expected):
actual = forest.Loader.full_pattern(name, leaf, prefix)
assert actual == expected
|
[
"forest.Loader.from_pattern",
"forest.Loader.group_args",
"forest.config.load_config",
"forest.config.FileGroup",
"yaml.dump",
"forest.Loader.full_pattern",
"forest.main.parse_args.parse_args",
"forest.db.Database.connect",
"forest.config.from_files",
"forest.Loader.replace_dir"
] |
[((113, 188), 'forest.Loader.from_pattern', 'forest.Loader.from_pattern', (['"""Label"""', '"""EarthNetworks*.txt"""', '"""earth_networks"""'], {}), "('Label', 'EarthNetworks*.txt', 'earth_networks')\n", (139, 188), False, 'import forest\n'), ((387, 420), 'forest.main.parse_args.parse_args', 'main.parse_args.parse_args', (['files'], {}), '(files)\n', (413, 420), False, 'from forest import main\n'), ((434, 486), 'forest.config.from_files', 'forest.config.from_files', (['args.files', 'args.file_type'], {}), '(args.files, args.file_type)\n', (458, 486), False, 'import forest\n'), ((534, 571), 'forest.Loader.group_args', 'forest.Loader.group_args', (['group', 'args'], {}), '(group, args)\n', (558, 571), False, 'import forest\n'), ((1131, 1222), 'forest.main.parse_args.parse_args', 'main.parse_args.parse_args', (["['--database', database_file, '--config-file', config_file]"], {}), "(['--database', database_file, '--config-file',\n config_file])\n", (1157, 1222), False, 'from forest import main\n'), ((1249, 1292), 'forest.config.load_config', 'forest.config.load_config', (['args.config_file'], {}), '(args.config_file)\n', (1274, 1292), False, 'import forest\n'), ((1342, 1383), 'forest.db.Database.connect', 'forest.db.Database.connect', (['database_file'], {}), '(database_file)\n', (1368, 1383), False, 'import forest\n'), ((1397, 1453), 'forest.Loader.group_args', 'forest.Loader.group_args', (['group', 'args'], {'database': 'database'}), '(group, args, database=database)\n', (1421, 1453), False, 'import forest\n'), ((1734, 1825), 'forest.main.parse_args.parse_args', 'main.parse_args.parse_args', (["['--database', database_file, '--config-file', config_file]"], {}), "(['--database', database_file, '--config-file',\n config_file])\n", (1760, 1825), False, 'from forest import main\n'), ((1917, 2002), 'forest.config.FileGroup', 'forest.config.FileGroup', (['label', 'pattern'], {'directory': 'directory', 'locator': '"""database"""'}), "(label, pattern, directory=directory, locator='database'\n )\n", (1940, 2002), False, 'import forest\n'), ((2062, 2103), 'forest.db.Database.connect', 'forest.db.Database.connect', (['database_file'], {}), '(database_file)\n', (2088, 2103), False, 'import forest\n'), ((2117, 2173), 'forest.Loader.group_args', 'forest.Loader.group_args', (['group', 'args'], {'database': 'database'}), '(group, args, database=database)\n', (2141, 2173), False, 'import forest\n'), ((2497, 2555), 'forest.main.parse_args.parse_args', 'main.parse_args.parse_args', (["['--config-file', config_file]"], {}), "(['--config-file', config_file])\n", (2523, 2555), False, 'from forest import main\n'), ((2643, 2731), 'forest.config.FileGroup', 'forest.config.FileGroup', (['label', 'pattern'], {'directory': 'directory', 'locator': '"""file_system"""'}), "(label, pattern, directory=directory, locator=\n 'file_system')\n", (2666, 2731), False, 'import forest\n'), ((2789, 2826), 'forest.Loader.group_args', 'forest.Loader.group_args', (['group', 'args'], {}), '(group, args)\n', (2813, 2826), False, 'import forest\n'), ((3005, 3058), 'forest.Loader.from_pattern', 'forest.Loader.from_pattern', (['label', 'pattern', 'file_type'], {}), '(label, pattern, file_type)\n', (3031, 3058), False, 'import forest\n'), ((3238, 3290), 'forest.Loader.from_pattern', 'forest.Loader.from_pattern', (['"""Label"""', '"""*.json"""', '"""rdt"""'], {}), "('Label', '*.json', 'rdt')\n", (3264, 3290), False, 'import forest\n'), ((4112, 4158), 'forest.Loader.replace_dir', 'forest.Loader.replace_dir', (['args_dir', 'group_dir'], {}), '(args_dir, group_dir)\n', (4137, 4158), False, 'import forest\n'), ((4899, 4945), 'forest.Loader.full_pattern', 'forest.Loader.full_pattern', (['name', 'leaf', 'prefix'], {}), '(name, leaf, prefix)\n', (4925, 4945), False, 'import forest\n'), ((1091, 1118), 'yaml.dump', 'yaml.dump', (['settings', 'stream'], {}), '(settings, stream)\n', (1100, 1118), False, 'import yaml\n')]
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Rule(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, rule_id: int=None, rule_name: str=None, description: str=None, priority: int=None): # noqa: E501
"""Rule - a model defined in Swagger
:param rule_id: The rule_id of this Rule. # noqa: E501
:type rule_id: int
:param rule_name: The rule_name of this Rule. # noqa: E501
:type rule_name: str
:param description: The description of this Rule. # noqa: E501
:type description: str
:param priority: The priority of this Rule. # noqa: E501
:type priority: int
"""
self.swagger_types = {
'rule_id': int,
'rule_name': str,
'description': str,
'priority': int
}
self.attribute_map = {
'rule_id': 'ruleID',
'rule_name': 'ruleName',
'description': 'description',
'priority': 'priority'
}
self._rule_id = rule_id
self._rule_name = rule_name
self._description = description
self._priority = priority
@classmethod
def from_dict(cls, dikt) -> 'Rule':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The rule of this Rule. # noqa: E501
:rtype: Rule
"""
return util.deserialize_model(dikt, cls)
@property
def rule_id(self) -> int:
"""Gets the rule_id of this Rule.
Unique ID of the rule # noqa: E501
:return: The rule_id of this Rule.
:rtype: int
"""
return self._rule_id
@rule_id.setter
def rule_id(self, rule_id: int):
"""Sets the rule_id of this Rule.
Unique ID of the rule # noqa: E501
:param rule_id: The rule_id of this Rule.
:type rule_id: int
"""
self._rule_id = rule_id
@property
def rule_name(self) -> str:
"""Gets the rule_name of this Rule.
name of rule # noqa: E501
:return: The rule_name of this Rule.
:rtype: str
"""
return self._rule_name
@rule_name.setter
def rule_name(self, rule_name: str):
"""Sets the rule_name of this Rule.
name of rule # noqa: E501
:param rule_name: The rule_name of this Rule.
:type rule_name: str
"""
if rule_name is None:
raise ValueError("Invalid value for `rule_name`, must not be `None`") # noqa: E501
self._rule_name = rule_name
@property
def description(self) -> str:
"""Gets the description of this Rule.
description of rule # noqa: E501
:return: The description of this Rule.
:rtype: str
"""
return self._description
@description.setter
def description(self, description: str):
"""Sets the description of this Rule.
description of rule # noqa: E501
:param description: The description of this Rule.
:type description: str
"""
if description is None:
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def priority(self) -> int:
"""Gets the priority of this Rule.
the current value it has towards the dataset, used to get more of a ceartin rule # noqa: E501
:return: The priority of this Rule.
:rtype: int
"""
return self._priority
@priority.setter
def priority(self, priority: int):
"""Sets the priority of this Rule.
the current value it has towards the dataset, used to get more of a ceartin rule # noqa: E501
:param priority: The priority of this Rule.
:type priority: int
"""
if priority is None:
raise ValueError("Invalid value for `priority`, must not be `None`") # noqa: E501
self._priority = priority
|
[
"swagger_server.util.deserialize_model"
] |
[((1688, 1721), 'swagger_server.util.deserialize_model', 'util.deserialize_model', (['dikt', 'cls'], {}), '(dikt, cls)\n', (1710, 1721), False, 'from swagger_server import util\n')]
|
from leetcode_tester import Tester
from typing import Optional, List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
r = 0
for i in range(1, len(prices)):
if prices[i] > prices[i - 1]:
r += prices[i] - prices[i - 1]
return r
if __name__ == '__main__':
solution = Solution()
test = Tester(solution.maxProfit)
test.addTest(
[7, 1, 5, 3, 6, 4], 7
)
test.addTest(
[1, 2, 3, 4, 5], 4
)
test.addTest(
[7, 6, 4, 3, 1], 0
)
test.doTest()
|
[
"leetcode_tester.Tester"
] |
[((365, 391), 'leetcode_tester.Tester', 'Tester', (['solution.maxProfit'], {}), '(solution.maxProfit)\n', (371, 391), False, 'from leetcode_tester import Tester\n')]
|
import torch
import json
import os
from torch.utils.data import DataLoader,Dataset
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
data_folder = "./dataset/images"
press_times = json.load(open("./dataset/dataset.json"))
image_roots = [os.path.join(data_folder,image_file) \
for image_file in os.listdir(data_folder)]
class JumpDataset(Dataset):
def __init__(self,transform = None):
self.image_roots = image_roots
self.press_times = press_times
self.transform = transform
def __len__(self):
return len(self.image_roots)
def __getitem__(self,idx):
image_root = self.image_roots[idx]
image_name = image_root.split("/")[-1]
image = Image.open(image_root)
image = image.convert('RGB')
image = image.resize((224,224), resample=Image.LANCZOS)
#image = np.array(image, dtype=np.float32)
if self.transform is not None:
image = self.transform(image)
press_time = self.press_times[image_name]
return image,press_time
def jump_data_loader():
normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
transform = transforms.Compose([transforms.ToTensor(),normalize])
dataset = JumpDataset(transform=transform)
return DataLoader(dataset,batch_size = 32,shuffle = True)
|
[
"os.listdir",
"PIL.Image.open",
"os.path.join",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.ToTensor"
] |
[((274, 311), 'os.path.join', 'os.path.join', (['data_folder', 'image_file'], {}), '(data_folder, image_file)\n', (286, 311), False, 'import os\n'), ((1136, 1228), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.92206, 0.92206, 0.92206]', 'std': '[0.08426, 0.08426, 0.08426]'}), '(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, \n 0.08426, 0.08426])\n', (1156, 1228), True, 'import torchvision.transforms as transforms\n'), ((1353, 1401), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(32)', 'shuffle': '(True)'}), '(dataset, batch_size=32, shuffle=True)\n', (1363, 1401), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((347, 370), 'os.listdir', 'os.listdir', (['data_folder'], {}), '(data_folder)\n', (357, 370), False, 'import os\n'), ((756, 778), 'PIL.Image.open', 'Image.open', (['image_root'], {}), '(image_root)\n', (766, 778), False, 'from PIL import Image\n'), ((1260, 1281), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1279, 1281), True, 'import torchvision.transforms as transforms\n')]
|
import json
from grafana_backup.dashboardApi import create_snapshot
def main(args, settings, file_path):
grafana_url = settings.get('GRAFANA_URL')
http_post_headers = settings.get('HTTP_POST_HEADERS')
verify_ssl = settings.get('VERIFY_SSL')
client_cert = settings.get('CLIENT_CERT')
debug = settings.get('DEBUG')
with open(file_path, 'r') as f:
data = f.read()
snapshot = json.loads(data)
try:
snapshot['name'] = snapshot['dashboard']['title']
except KeyError:
snapshot['name'] = "Untitled Snapshot"
(status, content) = create_snapshot(json.dumps(snapshot), grafana_url, http_post_headers, verify_ssl, client_cert, debug)
if status == 200:
print("create snapshot: {0}, status: {1}, msg: {2}".format(snapshot['name'], status, content))
else:
print("creating snapshot {0} failed with status {1}".format(snapshot['name'], status))
|
[
"json.loads",
"json.dumps"
] |
[((412, 428), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (422, 428), False, 'import json\n'), ((605, 625), 'json.dumps', 'json.dumps', (['snapshot'], {}), '(snapshot)\n', (615, 625), False, 'import json\n')]
|
""" Test case for Keras """
from perceptron.zoo.ssd_300.keras_ssd300 import SSD300
from perceptron.models.detection.keras_ssd300 import KerasSSD300Model
from perceptron.utils.image import load_image
from perceptron.benchmarks.brightness import BrightnessMetric
from perceptron.utils.criteria.detection import TargetClassMiss
from perceptron.utils.tools import bcolors
from perceptron.utils.tools import plot_image_objectdetection
# instantiate the model from keras applications
ssd300 = SSD300()
# initialize the KerasResNet50RetinaNetModel
kmodel = KerasSSD300Model(ssd300, bounds=(0, 255))
# get source image and label
# the model expects values in [0, 1], and channles_last
image = load_image(shape=(300, 300), bounds=(0, 255), fname='car.png')
metric = BrightnessMetric(kmodel, criterion=TargetClassMiss(7))
print(bcolors.BOLD + 'Process start' + bcolors.ENDC)
adversary = metric(image, unpack=False)
print(bcolors.BOLD + 'Process finished' + bcolors.ENDC)
if adversary.image is None:
print(bcolors.WARNING + 'Warning: Cannot find an adversary!' + bcolors.ENDC)
exit(-1)
################### print summary info #####################################
keywords = ['Keras', 'SSD300', 'TargetClassMiss', 'BrightnessMetric']
print(bcolors.HEADER + bcolors.UNDERLINE + 'Summary:' + bcolors.ENDC)
print('Configuration:' + bcolors.CYAN + ' --framework %s '
'--model %s --criterion %s '
'--metric %s' % tuple(keywords) + bcolors.ENDC)
print('Minimum perturbation required: %s' % bcolors.BLUE
+ str(adversary.distance) + bcolors.ENDC)
print('\n')
# print the original image and the adversary
plot_image_objectdetection(adversary, kmodel, bounds=(0, 255), title=", ".join(keywords), figname='examples/images/%s.png' % '_'.join(keywords))
|
[
"perceptron.models.detection.keras_ssd300.KerasSSD300Model",
"perceptron.utils.image.load_image",
"perceptron.utils.criteria.detection.TargetClassMiss",
"perceptron.zoo.ssd_300.keras_ssd300.SSD300"
] |
[((502, 510), 'perceptron.zoo.ssd_300.keras_ssd300.SSD300', 'SSD300', ([], {}), '()\n', (508, 510), False, 'from perceptron.zoo.ssd_300.keras_ssd300 import SSD300\n'), ((567, 608), 'perceptron.models.detection.keras_ssd300.KerasSSD300Model', 'KerasSSD300Model', (['ssd300'], {'bounds': '(0, 255)'}), '(ssd300, bounds=(0, 255))\n', (583, 608), False, 'from perceptron.models.detection.keras_ssd300 import KerasSSD300Model\n'), ((707, 769), 'perceptron.utils.image.load_image', 'load_image', ([], {'shape': '(300, 300)', 'bounds': '(0, 255)', 'fname': '"""car.png"""'}), "(shape=(300, 300), bounds=(0, 255), fname='car.png')\n", (717, 769), False, 'from perceptron.utils.image import load_image\n'), ((817, 835), 'perceptron.utils.criteria.detection.TargetClassMiss', 'TargetClassMiss', (['(7)'], {}), '(7)\n', (832, 835), False, 'from perceptron.utils.criteria.detection import TargetClassMiss\n')]
|
"""
Routines for the analysis of proton radiographs. These routines can be broadly
classified as either creating synthetic radiographs from prescribed fields or
methods of 'inverting' experimentally created radiographs to reconstruct the
original fields (under some set of assumptions).
"""
__all__ = [
"SyntheticProtonRadiograph",
]
import astropy.constants as const
import astropy.units as u
import numpy as np
import sys
import warnings
from tqdm import tqdm
from plasmapy import particles
from plasmapy.formulary.mathematics import rot_a_to_b
from plasmapy.particles import Particle
from plasmapy.plasma.grids import AbstractGrid
from plasmapy.simulation.particle_integrators import boris_push
def _coerce_to_cartesian_si(pos):
"""
Takes a tuple of `astropy.unit.Quantity` values representing a position
in space in either Cartesian, cylindrical, or spherical coordinates, and
returns a numpy array representing the same point in Cartesian
coordinates and units of meters.
"""
# Auto-detect geometry based on units
geo_units = [x.unit for x in pos]
if geo_units[2].is_equivalent(u.rad):
geometry = "spherical"
elif geo_units[1].is_equivalent(u.rad):
geometry = "cylindrical"
else:
geometry = "cartesian"
# Convert geometrical inputs between coordinates systems
pos_out = np.zeros(3)
if geometry == "cartesian":
x, y, z = pos
pos_out[0] = x.to(u.m).value
pos_out[1] = y.to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "cylindrical":
r, t, z = pos
r = r.to(u.m)
t = t.to(u.rad).value
z = z.to(u.m)
pos_out[0] = (r * np.cos(t)).to(u.m).value
pos_out[1] = (r * np.sin(t)).to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "spherical":
r, t, p = pos
r = r.to(u.m)
t = t.to(u.rad).value
p = p.to(u.rad).value
pos_out[0] = (r * np.sin(t) * np.cos(p)).to(u.m).value
pos_out[1] = (r * np.sin(t) * np.sin(p)).to(u.m).value
pos_out[2] = (r * np.cos(t)).to(u.m).value
return pos_out
class SyntheticProtonRadiograph:
r"""
Represents a charged particle radiography experiment with simulated or
calculated E and B fields given at positions defined by a grid of spatial
coordinates. The particle source and detector plane are defined by vectors
from the origin of the grid.
Parameters
----------
grid : `~plasmapy.plasma.grids.AbstractGrid` or subclass thereof
A Grid object containing the required quantities [E_x, E_y, E_z, B_x, B_y, B_z].
If any of these quantities are missing, a warning will be given and that
quantity will be assumed to be zero everywhere.
source : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the location
of the particle source. This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
detector : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center
of the detector plane. The vector from the source point to this
point defines the normal vector of the detector plane. This vector
can also be specified in cartesian, cylindrical, or spherical
coordinates (see the `source` keyword).
detector_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the detector plane. By default, the horizontal axis in the
detector plane is defined to be perpendicular to both the
source-to-detector vector and the z-axis (unless the source-to-detector axis
is parallel to the z axis, in which case the horizontal axis is the x-axis).
The detector vertical axis is then defined
to be orthogonal to both the source-to-detector vector and the
detector horizontal axis.
verbose : bool, optional
If true, updates on the status of the program will be printed
into the standard output while running.
"""
def __init__(
self,
grid: AbstractGrid,
source: u.m,
detector: u.m,
detector_hdir=None,
verbose=True,
):
# self.grid is the grid object
self.grid = grid
# self.grid_arr is the grid positions in si units. This is created here
# so that it isn't continously called later
self.grid_arr = grid.grid.to(u.m).value
self.verbose = verbose
# A list of wire meshes added to the grid with add_wire_mesh
# Particles that would hit these meshes will be removed at runtime
# by _apply_wire_mesh
self.mesh_list = []
# ************************************************************************
# Setup the source and detector geometries
# ************************************************************************
self.source = _coerce_to_cartesian_si(source)
self.detector = _coerce_to_cartesian_si(detector)
self._log(f"Source: {self.source} m")
self._log(f"Detector: {self.detector} m")
# Calculate normal vectors (facing towards the grid origin) for both
# the source and detector planes
self.src_n = -self.source / np.linalg.norm(self.source)
self.det_n = -self.detector / np.linalg.norm(self.detector)
# Vector directly from source to detector
self.src_det = self.detector - self.source
# Magnification
self.mag = 1 + np.linalg.norm(self.detector) / np.linalg.norm(self.source)
self._log(f"Magnification: {self.mag}")
# Check that source-detector vector actually passes through the grid
if not self.grid.vector_intersects(self.source * u.m, self.detector * u.m):
raise ValueError(
"The vector between the source and the detector "
"does not intersect the grid provided!"
)
# Determine the angle above which particles will not hit the grid
# these particles can be ignored until the end of the simulation,
# then immediately advanced to the detector grid with their original
# velocities
self.max_theta_hit_grid = self._max_theta_hit_grid()
# ************************************************************************
# Define the detector plane
# ************************************************************************
# Load or calculate the detector hdir
if detector_hdir is not None:
self.det_hdir = detector_hdir / np.linalg.norm(detector_hdir)
else:
self.det_hdir = self._default_detector_hdir()
# Calculate the detector vdir
ny = np.cross(self.det_hdir, self.det_n)
self.det_vdir = -ny / np.linalg.norm(ny)
# ************************************************************************
# Validate the E and B fields
# ************************************************************************
req_quantities = ["E_x", "E_y", "E_z", "B_x", "B_y", "B_z"]
self.grid.require_quantities(req_quantities, replace_with_zeros=True)
for rq in req_quantities:
# Check that there are no infinite values
if not np.isfinite(self.grid[rq].value).all():
raise ValueError(
f"Input arrays must be finite: {rq} contains "
"either NaN or infinite values."
)
# Check that the max values on the edges of the arrays are
# small relative to the maximum values on that grid
#
# Array must be dimensionless to re-assemble it into an array
# of max values like this
arr = np.abs(self.grid[rq]).value
edge_max = np.max(
np.array(
[
np.max(arr[0, :, :]),
np.max(arr[-1, :, :]),
np.max(arr[:, 0, :]),
np.max(arr[:, -1, :]),
np.max(arr[:, :, 0]),
np.max(arr[:, :, -1]),
]
)
)
if edge_max > 1e-3 * np.max(arr):
unit = grid.recognized_quantities[rq].unit
warnings.warn(
"Fields should go to zero at edges of grid to avoid "
f"non-physical effects, but a value of {edge_max:.2E} {unit} was "
f"found on the edge of the {rq} array. Consider applying a "
"envelope function to force the fields at the edge to go to "
"zero.",
RuntimeWarning,
)
def _default_detector_hdir(self):
"""
Calculates the default horizontal unit vector for the detector plane
(see __init__ description for details)
"""
# Create unit vectors that define the detector plane
# Define plane horizontal axis
if np.allclose(np.abs(self.det_n), np.array([0, 0, 1])):
nx = np.array([1, 0, 0])
else:
nx = np.cross(np.array([0, 0, 1]), self.det_n)
nx = nx / np.linalg.norm(nx)
return nx
def _max_theta_hit_grid(self):
r"""
Using the grid and the source position, compute the maximum particle
theta that will impact the grid. This value can be used to determine
which particles are worth tracking.
"""
ind = 0
theta = np.zeros([8])
for x in [0, -1]:
for y in [0, -1]:
for z in [0, -1]:
# Source to grid corner vector
vec = self.grid_arr[x, y, z, :] - self.source
# Calculate angle between vec and the source-to-detector
# axis, which is the central axis of the particle beam
theta[ind] = np.arccos(
np.dot(vec, self.src_det)
/ np.linalg.norm(vec)
/ np.linalg.norm(self.src_det)
)
ind += 1
return np.max(theta)
def _log(self, msg):
if self.verbose:
print(msg)
# Define some constants so they don't get constantly re-evaluated
_c = const.c.si.value
# *************************************************************************
# Create mesh
# *************************************************************************
def add_wire_mesh(
self, location, extent, nwires, wire_diameter, mesh_hdir=None, mesh_vdir=None
):
"""
Add a wire mesh grid between the particle source and the object grid
that blocks particles whose paths intersect the wires.
Parameters
----------
location : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center of the
mesh grid. This location must be between the source and the
object grid.
This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
extent : Tuple of 1 or 2 `~astropy.units.Quantity`
The size of the mesh grid (in the mesh plane). If one value
is provided, the mesh is circular and the value provided is
interpreted as the diameter. If two values are provided, the
mesh is rectangular and they the values are interpreted as the
width and height respectively.
nwires : Tuple of 1 or 2 ints, or a single int
The number of wires in the horizontal and vertical directions. If
only one value is provided, the number in the two directions is
assumed to be equal. Note that a wire will cross the center of the
mesh only when nwires is odd.
wire_diameter : `~astropy.units.Quantity`
The diameter of the wires.
mesh_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the mesh plane. Modifying this vector can rotate the
mesh in the plane or tilt the mesh plane relative to the
source-detector axis. By default, `mesh_hdir` is set equal to
`detector_hdir` (see `detector_hdir` keyword in `__init__`).
mesh_vdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the vertical
direction on the mesh plane. Modifying this vector can tilt the
mesh relative to the source-detector axis. By default, `mesh_vdir`
is defined to be perpendicular to `mesh_hdir` and the detector
plane normal (such that the mesh is parallel to the detector plane).
Raises
------
ValueError
Raises a ValueError if the provided mesh location is not
between the source and the object grid.
"""
location = _coerce_to_cartesian_si(location)
wire_radius = wire_diameter.si.value / 2
if not isinstance(extent, tuple):
extent = (extent,)
if len(extent) == 1:
radius = 0.5 * extent[0].si.value
width = extent[0].si.value
height = extent[0].si.value
elif len(extent) == 2:
radius = None
width = extent[0].si.value
height = extent[1].si.value
else:
raise ValueError(
"extent must be a tuple of 1 or 2 elements, but "
f"{len(extent)} elements were provided."
)
if not isinstance(nwires, tuple):
nwires = (nwires,)
if len(nwires) != 2:
nwires = (nwires[0], nwires[0])
# If no hdir/vdir is specified, calculate a default value
# If one is specified, make sure it is normalized
if mesh_hdir is None:
# Re-calculate the default here, in case the user
# specified a different det_hdir
mesh_hdir = self._default_detector_hdir()
else:
mesh_hdir = mesh_hdir / np.linalg.norm(mesh_hdir)
if mesh_vdir is None:
mesh_vdir = np.cross(mesh_hdir, self.det_n)
mesh_vdir = -mesh_vdir / np.linalg.norm(mesh_vdir)
else:
mesh_vdir = mesh_vdir / np.linalg.norm(mesh_vdir)
# Raise exception if mesh is AFTER the field grid
if np.linalg.norm(location - self.source) > np.linalg.norm(self.source):
raise ValueError(
f"The specified mesh location, {location},"
"is not between the source and the origin."
)
mesh_entry = {
"location": location,
"wire_radius": wire_radius,
"radius": radius,
"width": width,
"height": height,
"nwires": nwires,
"mesh_hdir": mesh_hdir,
"mesh_vdir": mesh_vdir,
}
self.mesh_list.append(mesh_entry)
def _apply_wire_mesh(
self,
location=None,
wire_radius=None,
radius=None,
width=None,
height=None,
nwires=None,
mesh_hdir=None,
mesh_vdir=None,
):
"""
Apply wire meshes that were added to self.mesh_list
"""
x = self._coast_to_plane(location, mesh_hdir, mesh_vdir)
# Particle positions in 2D on the mesh plane
xloc = np.dot(x - location, mesh_hdir)
yloc = np.dot(x - location, mesh_vdir)
# Create an array in which True indicates that a particle has hit a wire
# and False indicates that it has not
hit = np.zeros(self.nparticles, dtype=bool)
# Mark particles that overlap vertical or horizontal position with a wire
h_centers = np.linspace(-width / 2, width / 2, num=nwires[0])
for c in h_centers:
hit |= np.isclose(xloc, c, atol=wire_radius)
v_centers = np.linspace(-height / 2, height / 2, num=nwires[1])
for c in v_centers:
hit |= np.isclose(yloc, c, atol=wire_radius)
# Put back any particles that are outside the mesh boundaries
# First handle the case where the mesh is rectangular
if radius is None:
# Replace particles outside the x-boundary
hit[
np.logical_or(
xloc > np.max(h_centers) + wire_radius,
xloc < np.min(h_centers) - wire_radius,
)
] = False
# Replace particles outside the y-boundary
hit[
np.logical_or(
yloc > np.max(v_centers) + wire_radius,
yloc < np.min(v_centers) - wire_radius,
)
] = False
# Handle the case where the mesh is circular
else:
loc_rad = np.sqrt(xloc ** 2 + yloc ** 2)
hit[loc_rad > radius] = False
# In the case of a circular mesh, also create a round wire along the
# outside edge
hit[np.isclose(loc_rad, radius, atol=wire_radius)] = True
# Identify the particles that have hit something, then remove them from
# all of the arrays
keep_these_particles = ~hit
number_kept_particles = keep_these_particles.sum()
nremoved = self.nparticles - number_kept_particles
if self.nparticles - nremoved <= 0:
raise ValueError(
"The specified mesh is blocking all of the particles. "
f"The wire diameter ({2*wire_radius}) may be too large."
)
self.x = self.x[keep_these_particles, :]
self.v = self.v[keep_these_particles, :]
self.theta = self.theta[
keep_these_particles
] # Important to apply here to get correct grid_ind
self.nparticles = number_kept_particles
# *************************************************************************
# Particle creation methods
# *************************************************************************
def _angles_monte_carlo(self):
"""
Generates angles for each particle randomly such that the flux
per solid angle is uniform.
"""
# Create a probability vector for the theta distribution
# Theta must follow a sine distribution in order for the particle
# flux per solid angle to be uniform.
arg = np.linspace(0, self.max_theta, num=int(1e5))
prob = np.sin(arg)
prob *= 1 / np.sum(prob)
# Randomly choose theta's weighted with the sine probabilities
theta = np.random.choice(arg, size=self.nparticles, replace=True, p=prob)
# Also generate a uniform phi distribution
phi = np.random.uniform(high=2 * np.pi, size=self.nparticles)
return theta, phi
def _angles_uniform(self):
"""
Generates angles for each particle such that their velocities are
uniformly distributed on a grid in theta and phi. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
"""
# Calculate the approximate square root
n_per = np.floor(np.sqrt(self.nparticles)).astype(np.int32)
# Set new nparticles to be a perfect square
self.nparticles = n_per ** 2
# Create an imaginary grid positioned 1 unit from the source
# and spanning max_theta at the corners
extent = np.sin(self.max_theta) / np.sqrt(2)
arr = np.linspace(-extent, extent, num=n_per)
harr, varr = np.meshgrid(arr, arr, indexing="ij")
# calculate the angles from the source for each point in
# the grid.
theta = np.arctan(np.sqrt(harr ** 2 + varr ** 2))
phi = np.arctan2(varr, harr)
return theta.flatten(), phi.flatten()
@particles.particle_input
def create_particles(
self,
nparticles,
particle_energy,
max_theta=None,
particle: Particle = Particle("p+"),
distribution="monte-carlo",
):
r"""
Generates the angular distributions about the Z-axis, then
rotates those distributions to align with the source-to-detector axis.
By default, particles are generated over almost the entire pi/2. However,
if the detector is far from the source, many of these particles will
never be observed. The max_theta keyword allows these extraneous
particles to be neglected to focus computational resources on the
particles who will actually hit the detector.
nparticles : integer
The number of particles to include in the simulation. The default
is 1e5.
particle_energy : `~astropy.units.Quantity`
The energy of the particle, in units convertible to eV.
All particles are given the same energy.
max_theta : `~astropy.units.Quantity`, optional
The largest velocity vector angle (measured from the
source-to-detector axis) for which particles should be generated.
Decreasing this angle can eliminate particles that would never
reach the detector region of interest. If no value is given, a
guess will be made based on the size of the grid.
Units must be convertible to radians.
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unperturbed,they will form a uniform pattern
on the detection plane. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self._log("Creating Particles")
# Load inputs
self.nparticles = int(nparticles)
self.particle_energy = particle_energy.to(u.eV).value
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
# If max_theta is not specified, make a guess based on the grid size
if max_theta is None:
self.max_theta = np.clip(
1.5 * self.max_theta_hit_grid, 0.01, 0.99 * np.pi / 2
)
else:
self.max_theta = max_theta.to(u.rad).value
# Calculate the velocity corresponding to the particle energy
ER = self.particle_energy * 1.6e-19 / (self.m * self._c ** 2)
v0 = self._c * np.sqrt(1 - 1 / (ER + 1) ** 2)
if distribution == "monte-carlo":
theta, phi = self._angles_monte_carlo()
elif distribution == "uniform":
theta, phi = self._angles_uniform()
# Temporarily save theta to later determine which particles
# should be tracked
self.theta = theta
# Construct the velocity distribution around the z-axis
self.v = np.zeros([self.nparticles, 3])
self.v[:, 0] = v0 * np.sin(theta) * np.cos(phi)
self.v[:, 1] = v0 * np.sin(theta) * np.sin(phi)
self.v[:, 2] = v0 * np.cos(theta)
# Calculate the rotation matrix that rotates the z-axis
# onto the source-detector axis
a = np.array([0, 0, 1])
b = self.detector - self.source
rot = rot_a_to_b(a, b)
# Apply rotation matrix to calculated velocity distribution
self.v = np.matmul(self.v, rot)
# Place particles at the source
self.x = np.tile(self.source, (self.nparticles, 1))
@particles.particle_input
def load_particles(
self, x, v, particle: Particle = Particle("p+"),
):
r"""
Load arrays of particle positions and velocities
x : `~astropy.units.Quantity`, shape (N,3)
Positions for N particles
v: `~astropy.units.Quantity`, shape (N,3)
Velocities for N particles
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unpreturbed,they will form a uniform pattern
on the detection plane.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
if x.shape[0] != v.shape[0]:
raise ValueError(
"Provided x and v arrays have inconsistent numbers "
" of particles "
f"({x.shape[0]} and {v.shape[0]} respectively)."
)
else:
self.nparticles = x.shape[0]
self.x = x.to(u.m).value
self.v = v.to(u.m / u.s).value
self.theta = np.arccos(
np.inner(self.v, self.src_n) / np.linalg.norm(self.v, axis=-1)
)
n_wrong_way = np.sum(np.where(self.theta > np.pi / 2, 1, 0))
if n_wrong_way > 1:
warnings.warn(
f"{100*n_wrong_way/self.nparticles:.2f}% of particles "
"initialized are heading away from the grid. Check the orientation "
" of the provided velocity vectors.",
RuntimeWarning,
)
# *************************************************************************
# Run/push loop methods
# *************************************************************************
def _adaptive_dt(self, Ex, Ey, Ez, Bx, By, Bz):
r"""
Calculate the appropriate dt based on a number of considerations
including the local grid resolution (ds) and the gyroperiod of the
particles in the current fields.
"""
# If dt was explicitly set, skip the rest of this function
if self.dt.size == 1:
return self.dt
# Compute the timestep indicated by the grid resolution
ds = self.grid.grid_resolution.to(u.m).value
gridstep = 0.5 * (np.min(ds) / self.vmax)
# If not, compute a number of possible timesteps
# Compute the cyclotron gyroperiod
Bmag = np.max(np.sqrt(Bx ** 2 + By ** 2 + Bz ** 2)).to(u.T).value
# Compute the gyroperiod
if Bmag == 0:
gyroperiod = np.inf
else:
gyroperiod = 2 * np.pi * self.m / (self.q * np.max(Bmag))
# TODO: introduce a minimum timestep based on electric fields too!
# Create an array of all the possible time steps we computed
candidates = np.array([gyroperiod / 12, gridstep])
# Enforce limits on dt
candidates = np.clip(candidates, self.dt[0], self.dt[1])
# dt is the min of the remaining candidates
return np.min(candidates)
def _coast_to_grid(self):
r"""
Coasts all particles to the timestep when the first particle should
be entering the grid. Doing in this in one step (rather than pushing
the particles through zero fields) saves computation time.
"""
# Distance from the source to the nearest gridpoint
dist = np.min(np.linalg.norm(self.grid_arr - self.source, axis=3))
# Find the particle with the highest speed towards the grid
vmax = np.max(np.dot(self.v, self.src_n))
# Time for fastest possible particle to reach the grid.
t = dist / vmax
# Coast the particles to the advanced position
self.x = self.x + self.v * t
def _coast_to_plane(self, center, hdir, vdir, x=None):
"""
Calculates the positions where the current trajectories of each
particle impact a plane, described by the plane's center and
horizontal and vertical unit vectors.
Returns an [nparticles, 3] array of the particle positions in the plane
By default this function does not alter self.x. The optional keyword
x can be used to pass in an output array that will used to hold
the positions in the plane. This can be used to directly update self.x
as follows:
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x = self.x)
"""
normal = np.cross(hdir, vdir)
# Calculate the time required to evolve each particle into the
# plane
t = np.inner(center[np.newaxis, :] - self.x, normal) / np.inner(self.v, normal)
# Calculate particle positions in the plane
if x is None:
# If no output array is provided, preallocate
x = np.empty_like(self.x)
x[...] = self.x + self.v * t[:, np.newaxis]
# Check that all points are now in the plane
# (Eq. of a plane is nhat*x + d = 0)
plane_eq = np.dot(x - center, normal)
assert np.allclose(plane_eq, 0, atol=1e-6)
return x
def _remove_deflected_particles(self):
r"""
Removes any particles that have been deflected away from the detector
plane (eg. those that will never hit the grid)
"""
dist_remaining = np.dot(self.x, self.det_n) + np.linalg.norm(self.detector)
v_towards_det = np.dot(self.v, -self.det_n)
# If particles have not yet reached the detector plane and are moving
# away from it, they will never reach the detector.
# So, we can remove them from the arrays
# Find the indices of all particles that we should keep:
# i.e. those still moving towards the detector.
ind = np.logical_not((v_towards_det < 0) & (dist_remaining > 0)).nonzero()[0]
# Drop the other particles
self.x = self.x[ind, :]
self.v = self.v[ind, :]
self.v_init = self.v_init[ind, :]
self.nparticles_grid = self.x.shape[0]
# Store the number of particles deflected
self.fract_deflected = (self.nparticles - ind.size) / self.nparticles
# Warn the user if a large number of particles are being deflected
if self.fract_deflected > 0.05:
warnings.warn(
f"{100*self.fract_deflected:.1f}% particles have been "
"deflected away from the detector plane. The fields "
"provided may be too high to successfully radiograph "
"with this particle energy.",
RuntimeWarning,
)
def _push(self):
r"""
Advance particles using an implementation of the time-centered
Boris algorithm
"""
# Get a list of positions (input for interpolator)
pos = self.x[self.grid_ind, :] * u.m
# Update the list of particles on and off the grid
self.on_grid = self.grid.on_grid(pos)
# entered_grid is zero at the end if a particle has never
# entered the grid
self.entered_grid += self.on_grid
# Estimate the E and B fields for each particle
# Note that this interpolation step is BY FAR the slowest part of the push
# loop. Any speed improvements will have to come from here.
if self.field_weighting == "volume averaged":
Ex, Ey, Ez, Bx, By, Bz = self.grid.volume_averaged_interpolator(
pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True,
)
elif self.field_weighting == "nearest neighbor":
Ex, Ey, Ez, Bx, By, Bz = self.grid.nearest_neighbor_interpolator(
pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True,
)
# Create arrays of E and B as required by push algorithm
E = np.array(
[Ex.to(u.V / u.m).value, Ey.to(u.V / u.m).value, Ez.to(u.V / u.m).value]
)
E = np.moveaxis(E, 0, -1)
B = np.array([Bx.to(u.T).value, By.to(u.T).value, Bz.to(u.T).value])
B = np.moveaxis(B, 0, -1)
# Calculate the adaptive timestep from the fields currently experienced
# by the particles
# If user sets dt explicitly, that's handled in _adpative_dt
dt = self._adaptive_dt(Ex, Ey, Ez, Bx, By, Bz)
# TODO: Test v/c and implement relativistic Boris push when required
# vc = np.max(v)/_c
x = self.x[self.grid_ind, :]
v = self.v[self.grid_ind, :]
boris_push(x, v, B, E, self.q, self.m, dt)
self.x[self.grid_ind, :] = x
self.v[self.grid_ind, :] = v
def _stop_condition(self):
r"""
The stop condition is that most of the particles have entered the grid
and almost all have now left it.
"""
# Count the number of particles who have entered, which is the
# number of non-zero entries in entered_grid
self.num_entered = np.nonzero(self.entered_grid)[0].size
# How many of the particles have entered the grid
self.fract_entered = np.sum(self.num_entered) / self.nparticles_grid
# Of the particles that have entered the grid, how many are currently
# on the grid?
# if/else avoids dividing by zero
if np.sum(self.num_entered) > 0:
still_on = np.sum(self.on_grid) / np.sum(self.num_entered)
else:
still_on = 0.0
if self.fract_entered > 0.1 and still_on < 0.001:
# Warn user if < 10% of the particles ended up on the grid
if self.num_entered < 0.1 * self.nparticles:
warnings.warn(
f"Only {100*self.num_entered/self.nparticles:.2f}% of "
"particles entered the field grid: consider "
"decreasing the max_theta to increase this "
"number.",
RuntimeWarning,
)
return True
else:
return False
def run(
self, dt=None, field_weighting="volume averaged",
):
r"""
Runs a particle-tracing simulation.
Timesteps are adaptively calculated based on the
local grid resolution of the particles and the electric and magnetic
fields they are experiencing. After all particles
have left the grid, they are advanced to the
detector plane where they can be used to construct a synthetic
diagnostic image.
Parameters
----------
dt : `~astropy.units.Quantity`, optional
An explicitly set timestep in units convertable to seconds.
Setting this optional keyword overrules the adaptive time step
capability and forces the use of this timestep throughout. If a tuple
of timesteps is provided, the adaptive timstep will be clamped
between the first and second values.
field_weighting : str
String that selects the field weighting algorithm used to determine
what fields are felt by the particles. Options are:
* 'nearest neighbor': Particles are assigned the fields on
the grid vertex closest to them.
* 'volume averaged' : The fields experienced by a particle are a
volume-average of the eight grid points surrounding them.
The default is 'volume averaged'.
Returns
-------
None.
"""
# Load and validate inputs
field_weightings = ["volume averaged", "nearest neighbor"]
if field_weighting in field_weightings:
self.field_weighting = field_weighting
else:
raise ValueError(
f"{field_weighting} is not a valid option for ",
"field_weighting. Valid choices are",
f"{field_weightings}",
)
if dt is None:
# Set dt as an infinite range by default (auto dt with no restrictions)
self.dt = np.array([0.0, np.inf]) * u.s
else:
self.dt = dt
self.dt = (self.dt).to(u.s).value
# Check to make sure particles have already been generated
if not hasattr(self, "x"):
raise ValueError(
"Either the create_particles or load_particles method must be "
"called before running the particle tracing algorithm."
)
# If meshes have been added, apply them now
for mesh in self.mesh_list:
self._apply_wire_mesh(**mesh)
# Store a copy of the initial velocity distribution in memory
# This will be used later to calculate the maximum deflection
self.v_init = np.copy(self.v)
# Calculate the maximum velocity
# Used for determining the grid crossing maximum timestep
self.vmax = np.max(np.linalg.norm(self.v, axis=-1))
# Determine which particles should be tracked
# This array holds the indices of all particles that WILL hit the grid
# Only these particles will actually be pushed through the fields
self.grid_ind = np.where(self.theta < self.max_theta_hit_grid)[0]
self.nparticles_grid = len(self.grid_ind)
self.fract_tracked = self.nparticles_grid / self.nparticles
# Create flags for tracking when particles during the simulation
# on_grid -> zero if the particle is off grid, 1
self.on_grid = np.zeros([self.nparticles_grid])
# Entered grid -> non-zero if particle EVER entered the grid
self.entered_grid = np.zeros([self.nparticles_grid])
# Generate a null distribution of points (the result in the absence of
# any fields) for statistical comparison
self.x0 = self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir)
# Advance the particles to the near the start of the grid
self._coast_to_grid()
# Initialize a "progress bar" (really more of a meter)
# Setting sys.stdout lets this play nicely with regular print()
pbar = tqdm(
initial=0,
total=self.nparticles_grid + 1,
disable=not self.verbose,
desc="Particles on grid",
unit="particles",
bar_format="{l_bar}{bar}{n:.1e}/{total:.1e} {unit}",
file=sys.stdout,
)
# Push the particles until the stop condition is satisfied
# (no more particles on the simulation grid)
while not self._stop_condition():
n_on_grid = np.sum(self.on_grid)
pbar.n = n_on_grid
pbar.last_print_n = n_on_grid
pbar.update()
self._push()
pbar.close()
# Remove particles that will never reach the detector
self._remove_deflected_particles()
# Advance the particles to the image plane
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x=self.x)
# Log a summary of the run
self._log("Run completed")
self._log("Fraction of particles tracked: " f"{self.fract_tracked*100:.1f}%")
self._log(
"Fraction of tracked particles that entered the grid: "
f"{self.fract_entered*100:.1f}%"
)
self._log(
"Fraction of tracked particles deflected away from the "
"detector plane: "
f"{self.fract_deflected*100}%"
)
@property
def max_deflection(self):
"""
The maximum deflection experienced by one of the particles, determined
by comparing their initial and final velocitiy vectors.
This value can be used to determine the charged particle radiography regime
using the dimensionless number defined by Kugland et al. 2012
Returns
-------
max_deflection : float
The maximum deflection in radians
"""
# Normalize the initial and final velocities
v_norm = self.v / np.linalg.norm(self.v, axis=1, keepdims=True)
v_init_norm = self.v_init / np.linalg.norm(self.v_init, axis=1, keepdims=True)
# Compute the dot product
proj = np.sum(v_norm * v_init_norm, axis=1)
# In case of numerical errors, make sure the output is within the domain of
# arccos
proj = np.where(proj > 1, 1, proj)
max_deflection = np.max(np.arccos(proj))
return max_deflection * u.rad
# *************************************************************************
# Synthetic diagnostic methods (creating output)
# *************************************************************************
def synthetic_radiograph(
self, size=None, bins=[200, 200], ignore_grid=False, optical_density=False
):
r"""
Calculate a "synthetic radiograph" (particle count histogram in the
image plane).
Parameters
----------
size : `~astropy.units.Quantity`, shape (2,2)
The size of the detector array, specified as the minimum
and maximum values included in both the horizontal and vertical
directions in the detector plane coordinates. Shape is
[[hmin,hmax], [vmin, vmax]]. Units must be convertable to meters.
bins : array of integers, shape (2)
The number of bins in each direction in the format [hbins, vbins].
The default is [200,200].
ignore_grid: bool
If True, returns the intensity in the image plane in the absence
of simulated fields.
optical_density: bool
If True, return the optical density rather than the intensity
.. math::
OD = -log_{10}(Intensity/I_0)
where I_O is the intensity on the detector plane in the absence of
simulated fields. Default is False.
Returns
-------
hax : `~astropy.units.Quantity` array shape (hbins,)
The horizontal axis of the synthetic radiograph in meters.
vax : `~astropy.units.Quantity` array shape (vbins, )
The vertical axis of the synthetic radiograph in meters.
intensity : ndarray, shape (hbins, vbins)
The number of particles counted in each bin of the histogram.
"""
# Note that, at the end of the simulation, all particles were moved
# into the image plane.
# If ignore_grid is True, use the predicted positions in the absence of
# simulated fields
if ignore_grid:
x = self.x0
else:
x = self.x
# Determine locations of points in the detector plane using unit
# vectors
xloc = np.dot(x - self.detector, self.det_hdir)
yloc = np.dot(x - self.detector, self.det_vdir)
if size is None:
# If a detector size is not given, choose lengths based on the
# dimensions of the grid
w = self.mag * np.max(
[
np.max(np.abs(self.grid.pts0.to(u.m).value)),
np.max(np.abs(self.grid.pts1.to(u.m).value)),
np.max(np.abs(self.grid.pts2.to(u.m).value)),
]
)
# The factor of 5 here is somewhat arbitrary: we just want a
# region a few times bigger than the image of the grid on the
# detector, since particles could be deflected out
size = 5 * np.array([[-w, w], [-w, w]]) * u.m
# Generate the histogram
intensity, h, v = np.histogram2d(
xloc, yloc, range=size.to(u.m).value, bins=bins
)
# h, v are the bin edges: compute the centers to produce arrays
# of the right length (then trim off the extra point)
h = ((h + np.roll(h, -1)) / 2)[0:-1]
v = ((v + np.roll(v, -1)) / 2)[0:-1]
# Throw a warning if < 50% of the particles are included on the
# histogram
percentage = np.sum(intensity) / self.nparticles
if percentage < 0.5:
warnings.warn(
f"Only {percentage:.2%} of the particles are shown "
"on this synthetic radiograph. Consider increasing "
"the size to include more.",
RuntimeWarning,
)
if optical_density:
# Generate the null radiograph
x, y, I0 = self.synthetic_radiograph(size=size, bins=bins, ignore_grid=True)
# Calculate I0 as the mean of the non-zero values in the null
# histogram. Zeros are just outside of the illuminate area.
I0 = np.mean(I0[I0 != 0])
# Overwrite any zeros in intensity to avoid log10(0)
intensity[intensity == 0] = 1
# Calculate the optical_density
intensity = -np.log10(intensity / I0)
return h * u.m, v * u.m, intensity
|
[
"numpy.clip",
"numpy.log10",
"numpy.sqrt",
"numpy.arccos",
"plasmapy.simulation.particle_integrators.boris_push",
"numpy.logical_not",
"numpy.array",
"numpy.arctan2",
"numpy.isfinite",
"numpy.linalg.norm",
"numpy.sin",
"numpy.moveaxis",
"numpy.mean",
"numpy.cross",
"numpy.where",
"numpy.max",
"plasmapy.particles.Particle",
"numpy.dot",
"numpy.linspace",
"numpy.matmul",
"numpy.min",
"warnings.warn",
"numpy.meshgrid",
"numpy.tile",
"numpy.abs",
"numpy.allclose",
"numpy.random.choice",
"numpy.inner",
"numpy.cos",
"numpy.nonzero",
"numpy.copy",
"numpy.isclose",
"numpy.roll",
"plasmapy.formulary.mathematics.rot_a_to_b",
"tqdm.tqdm",
"numpy.sum",
"numpy.zeros",
"numpy.empty_like",
"numpy.random.uniform"
] |
[((1362, 1373), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1370, 1373), True, 'import numpy as np\n'), ((7185, 7220), 'numpy.cross', 'np.cross', (['self.det_hdir', 'self.det_n'], {}), '(self.det_hdir, self.det_n)\n', (7193, 7220), True, 'import numpy as np\n'), ((10017, 10030), 'numpy.zeros', 'np.zeros', (['[8]'], {}), '([8])\n', (10025, 10030), True, 'import numpy as np\n'), ((10652, 10665), 'numpy.max', 'np.max', (['theta'], {}), '(theta)\n', (10658, 10665), True, 'import numpy as np\n'), ((16375, 16406), 'numpy.dot', 'np.dot', (['(x - location)', 'mesh_hdir'], {}), '(x - location, mesh_hdir)\n', (16381, 16406), True, 'import numpy as np\n'), ((16422, 16453), 'numpy.dot', 'np.dot', (['(x - location)', 'mesh_vdir'], {}), '(x - location, mesh_vdir)\n', (16428, 16453), True, 'import numpy as np\n'), ((16596, 16633), 'numpy.zeros', 'np.zeros', (['self.nparticles'], {'dtype': 'bool'}), '(self.nparticles, dtype=bool)\n', (16604, 16633), True, 'import numpy as np\n'), ((16737, 16786), 'numpy.linspace', 'np.linspace', (['(-width / 2)', '(width / 2)'], {'num': 'nwires[0]'}), '(-width / 2, width / 2, num=nwires[0])\n', (16748, 16786), True, 'import numpy as np\n'), ((16893, 16944), 'numpy.linspace', 'np.linspace', (['(-height / 2)', '(height / 2)'], {'num': 'nwires[1]'}), '(-height / 2, height / 2, num=nwires[1])\n', (16904, 16944), True, 'import numpy as np\n'), ((19447, 19458), 'numpy.sin', 'np.sin', (['arg'], {}), '(arg)\n', (19453, 19458), True, 'import numpy as np\n'), ((19580, 19645), 'numpy.random.choice', 'np.random.choice', (['arg'], {'size': 'self.nparticles', 'replace': '(True)', 'p': 'prob'}), '(arg, size=self.nparticles, replace=True, p=prob)\n', (19596, 19645), True, 'import numpy as np\n'), ((19712, 19767), 'numpy.random.uniform', 'np.random.uniform', ([], {'high': '(2 * np.pi)', 'size': 'self.nparticles'}), '(high=2 * np.pi, size=self.nparticles)\n', (19729, 19767), True, 'import numpy as np\n'), ((20567, 20606), 'numpy.linspace', 'np.linspace', (['(-extent)', 'extent'], {'num': 'n_per'}), '(-extent, extent, num=n_per)\n', (20578, 20606), True, 'import numpy as np\n'), ((20628, 20664), 'numpy.meshgrid', 'np.meshgrid', (['arr', 'arr'], {'indexing': '"""ij"""'}), "(arr, arr, indexing='ij')\n", (20639, 20664), True, 'import numpy as np\n'), ((20823, 20845), 'numpy.arctan2', 'np.arctan2', (['varr', 'harr'], {}), '(varr, harr)\n', (20833, 20845), True, 'import numpy as np\n'), ((21062, 21076), 'plasmapy.particles.Particle', 'Particle', (['"""p+"""'], {}), "('p+')\n", (21070, 21076), False, 'from plasmapy.particles import Particle\n'), ((24716, 24746), 'numpy.zeros', 'np.zeros', (['[self.nparticles, 3]'], {}), '([self.nparticles, 3])\n', (24724, 24746), True, 'import numpy as np\n'), ((25018, 25037), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (25026, 25037), True, 'import numpy as np\n'), ((25092, 25108), 'plasmapy.formulary.mathematics.rot_a_to_b', 'rot_a_to_b', (['a', 'b'], {}), '(a, b)\n', (25102, 25108), False, 'from plasmapy.formulary.mathematics import rot_a_to_b\n'), ((25195, 25217), 'numpy.matmul', 'np.matmul', (['self.v', 'rot'], {}), '(self.v, rot)\n', (25204, 25217), True, 'import numpy as np\n'), ((25276, 25318), 'numpy.tile', 'np.tile', (['self.source', '(self.nparticles, 1)'], {}), '(self.source, (self.nparticles, 1))\n', (25283, 25318), True, 'import numpy as np\n'), ((25415, 25429), 'plasmapy.particles.Particle', 'Particle', (['"""p+"""'], {}), "('p+')\n", (25423, 25429), False, 'from plasmapy.particles import Particle\n'), ((28862, 28899), 'numpy.array', 'np.array', (['[gyroperiod / 12, gridstep]'], {}), '([gyroperiod / 12, gridstep])\n', (28870, 28899), True, 'import numpy as np\n'), ((28953, 28996), 'numpy.clip', 'np.clip', (['candidates', 'self.dt[0]', 'self.dt[1]'], {}), '(candidates, self.dt[0], self.dt[1])\n', (28960, 28996), True, 'import numpy as np\n'), ((29065, 29083), 'numpy.min', 'np.min', (['candidates'], {}), '(candidates)\n', (29071, 29083), True, 'import numpy as np\n'), ((30503, 30523), 'numpy.cross', 'np.cross', (['hdir', 'vdir'], {}), '(hdir, vdir)\n', (30511, 30523), True, 'import numpy as np\n'), ((31042, 31068), 'numpy.dot', 'np.dot', (['(x - center)', 'normal'], {}), '(x - center, normal)\n', (31048, 31068), True, 'import numpy as np\n'), ((31084, 31120), 'numpy.allclose', 'np.allclose', (['plane_eq', '(0)'], {'atol': '(1e-06)'}), '(plane_eq, 0, atol=1e-06)\n', (31095, 31120), True, 'import numpy as np\n'), ((31449, 31476), 'numpy.dot', 'np.dot', (['self.v', '(-self.det_n)'], {}), '(self.v, -self.det_n)\n', (31455, 31476), True, 'import numpy as np\n'), ((33983, 34004), 'numpy.moveaxis', 'np.moveaxis', (['E', '(0)', '(-1)'], {}), '(E, 0, -1)\n', (33994, 34004), True, 'import numpy as np\n'), ((34094, 34115), 'numpy.moveaxis', 'np.moveaxis', (['B', '(0)', '(-1)'], {}), '(B, 0, -1)\n', (34105, 34115), True, 'import numpy as np\n'), ((34537, 34579), 'plasmapy.simulation.particle_integrators.boris_push', 'boris_push', (['x', 'v', 'B', 'E', 'self.q', 'self.m', 'dt'], {}), '(x, v, B, E, self.q, self.m, dt)\n', (34547, 34579), False, 'from plasmapy.simulation.particle_integrators import boris_push\n'), ((38754, 38769), 'numpy.copy', 'np.copy', (['self.v'], {}), '(self.v)\n', (38761, 38769), True, 'import numpy as np\n'), ((39492, 39524), 'numpy.zeros', 'np.zeros', (['[self.nparticles_grid]'], {}), '([self.nparticles_grid])\n', (39500, 39524), True, 'import numpy as np\n'), ((39622, 39654), 'numpy.zeros', 'np.zeros', (['[self.nparticles_grid]'], {}), '([self.nparticles_grid])\n', (39630, 39654), True, 'import numpy as np\n'), ((40116, 40312), 'tqdm.tqdm', 'tqdm', ([], {'initial': '(0)', 'total': '(self.nparticles_grid + 1)', 'disable': '(not self.verbose)', 'desc': '"""Particles on grid"""', 'unit': '"""particles"""', 'bar_format': '"""{l_bar}{bar}{n:.1e}/{total:.1e} {unit}"""', 'file': 'sys.stdout'}), "(initial=0, total=self.nparticles_grid + 1, disable=not self.verbose,\n desc='Particles on grid', unit='particles', bar_format=\n '{l_bar}{bar}{n:.1e}/{total:.1e} {unit}', file=sys.stdout)\n", (40120, 40312), False, 'from tqdm import tqdm\n'), ((42209, 42245), 'numpy.sum', 'np.sum', (['(v_norm * v_init_norm)'], {'axis': '(1)'}), '(v_norm * v_init_norm, axis=1)\n', (42215, 42245), True, 'import numpy as np\n'), ((42362, 42389), 'numpy.where', 'np.where', (['(proj > 1)', '(1)', 'proj'], {}), '(proj > 1, 1, proj)\n', (42370, 42389), True, 'import numpy as np\n'), ((44751, 44791), 'numpy.dot', 'np.dot', (['(x - self.detector)', 'self.det_hdir'], {}), '(x - self.detector, self.det_hdir)\n', (44757, 44791), True, 'import numpy as np\n'), ((44807, 44847), 'numpy.dot', 'np.dot', (['(x - self.detector)', 'self.det_vdir'], {}), '(x - self.detector, self.det_vdir)\n', (44813, 44847), True, 'import numpy as np\n'), ((5709, 5736), 'numpy.linalg.norm', 'np.linalg.norm', (['self.source'], {}), '(self.source)\n', (5723, 5736), True, 'import numpy as np\n'), ((5775, 5804), 'numpy.linalg.norm', 'np.linalg.norm', (['self.detector'], {}), '(self.detector)\n', (5789, 5804), True, 'import numpy as np\n'), ((7251, 7269), 'numpy.linalg.norm', 'np.linalg.norm', (['ny'], {}), '(ny)\n', (7265, 7269), True, 'import numpy as np\n'), ((9519, 9537), 'numpy.abs', 'np.abs', (['self.det_n'], {}), '(self.det_n)\n', (9525, 9537), True, 'import numpy as np\n'), ((9539, 9558), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (9547, 9558), True, 'import numpy as np\n'), ((9578, 9597), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (9586, 9597), True, 'import numpy as np\n'), ((9689, 9707), 'numpy.linalg.norm', 'np.linalg.norm', (['nx'], {}), '(nx)\n', (9703, 9707), True, 'import numpy as np\n'), ((15113, 15144), 'numpy.cross', 'np.cross', (['mesh_hdir', 'self.det_n'], {}), '(mesh_hdir, self.det_n)\n', (15121, 15144), True, 'import numpy as np\n'), ((15354, 15392), 'numpy.linalg.norm', 'np.linalg.norm', (['(location - self.source)'], {}), '(location - self.source)\n', (15368, 15392), True, 'import numpy as np\n'), ((15395, 15422), 'numpy.linalg.norm', 'np.linalg.norm', (['self.source'], {}), '(self.source)\n', (15409, 15422), True, 'import numpy as np\n'), ((16834, 16871), 'numpy.isclose', 'np.isclose', (['xloc', 'c'], {'atol': 'wire_radius'}), '(xloc, c, atol=wire_radius)\n', (16844, 16871), True, 'import numpy as np\n'), ((16992, 17029), 'numpy.isclose', 'np.isclose', (['yloc', 'c'], {'atol': 'wire_radius'}), '(yloc, c, atol=wire_radius)\n', (17002, 17029), True, 'import numpy as np\n'), ((17805, 17835), 'numpy.sqrt', 'np.sqrt', (['(xloc ** 2 + yloc ** 2)'], {}), '(xloc ** 2 + yloc ** 2)\n', (17812, 17835), True, 'import numpy as np\n'), ((19479, 19491), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (19485, 19491), True, 'import numpy as np\n'), ((20517, 20539), 'numpy.sin', 'np.sin', (['self.max_theta'], {}), '(self.max_theta)\n', (20523, 20539), True, 'import numpy as np\n'), ((20542, 20552), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20549, 20552), True, 'import numpy as np\n'), ((20777, 20807), 'numpy.sqrt', 'np.sqrt', (['(harr ** 2 + varr ** 2)'], {}), '(harr ** 2 + varr ** 2)\n', (20784, 20807), True, 'import numpy as np\n'), ((23970, 24032), 'numpy.clip', 'np.clip', (['(1.5 * self.max_theta_hit_grid)', '(0.01)', '(0.99 * np.pi / 2)'], {}), '(1.5 * self.max_theta_hit_grid, 0.01, 0.99 * np.pi / 2)\n', (23977, 24032), True, 'import numpy as np\n'), ((24296, 24326), 'numpy.sqrt', 'np.sqrt', (['(1 - 1 / (ER + 1) ** 2)'], {}), '(1 - 1 / (ER + 1) ** 2)\n', (24303, 24326), True, 'import numpy as np\n'), ((24791, 24802), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (24797, 24802), True, 'import numpy as np\n'), ((24847, 24858), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (24853, 24858), True, 'import numpy as np\n'), ((24887, 24900), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (24893, 24900), True, 'import numpy as np\n'), ((27248, 27286), 'numpy.where', 'np.where', (['(self.theta > np.pi / 2)', '(1)', '(0)'], {}), '(self.theta > np.pi / 2, 1, 0)\n', (27256, 27286), True, 'import numpy as np\n'), ((27328, 27528), 'warnings.warn', 'warnings.warn', (['f"""{100 * n_wrong_way / self.nparticles:.2f}% of particles initialized are heading away from the grid. Check the orientation of the provided velocity vectors."""', 'RuntimeWarning'], {}), "(\n f'{100 * n_wrong_way / self.nparticles:.2f}% of particles initialized are heading away from the grid. Check the orientation of the provided velocity vectors.'\n , RuntimeWarning)\n", (27341, 27528), False, 'import warnings\n'), ((29442, 29493), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.grid_arr - self.source)'], {'axis': '(3)'}), '(self.grid_arr - self.source, axis=3)\n', (29456, 29493), True, 'import numpy as np\n'), ((29586, 29612), 'numpy.dot', 'np.dot', (['self.v', 'self.src_n'], {}), '(self.v, self.src_n)\n', (29592, 29612), True, 'import numpy as np\n'), ((30624, 30672), 'numpy.inner', 'np.inner', (['(center[np.newaxis, :] - self.x)', 'normal'], {}), '(center[np.newaxis, :] - self.x, normal)\n', (30632, 30672), True, 'import numpy as np\n'), ((30675, 30699), 'numpy.inner', 'np.inner', (['self.v', 'normal'], {}), '(self.v, normal)\n', (30683, 30699), True, 'import numpy as np\n'), ((30849, 30870), 'numpy.empty_like', 'np.empty_like', (['self.x'], {}), '(self.x)\n', (30862, 30870), True, 'import numpy as np\n'), ((31365, 31391), 'numpy.dot', 'np.dot', (['self.x', 'self.det_n'], {}), '(self.x, self.det_n)\n', (31371, 31391), True, 'import numpy as np\n'), ((31394, 31423), 'numpy.linalg.norm', 'np.linalg.norm', (['self.detector'], {}), '(self.detector)\n', (31408, 31423), True, 'import numpy as np\n'), ((32319, 32546), 'warnings.warn', 'warnings.warn', (['f"""{100 * self.fract_deflected:.1f}% particles have been deflected away from the detector plane. The fields provided may be too high to successfully radiograph with this particle energy."""', 'RuntimeWarning'], {}), "(\n f'{100 * self.fract_deflected:.1f}% particles have been deflected away from the detector plane. The fields provided may be too high to successfully radiograph with this particle energy.'\n , RuntimeWarning)\n", (32332, 32546), False, 'import warnings\n'), ((35108, 35132), 'numpy.sum', 'np.sum', (['self.num_entered'], {}), '(self.num_entered)\n', (35114, 35132), True, 'import numpy as np\n'), ((35311, 35335), 'numpy.sum', 'np.sum', (['self.num_entered'], {}), '(self.num_entered)\n', (35317, 35335), True, 'import numpy as np\n'), ((38905, 38936), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v'], {'axis': '(-1)'}), '(self.v, axis=-1)\n', (38919, 38936), True, 'import numpy as np\n'), ((39170, 39216), 'numpy.where', 'np.where', (['(self.theta < self.max_theta_hit_grid)'], {}), '(self.theta < self.max_theta_hit_grid)\n', (39178, 39216), True, 'import numpy as np\n'), ((40586, 40606), 'numpy.sum', 'np.sum', (['self.on_grid'], {}), '(self.on_grid)\n', (40592, 40606), True, 'import numpy as np\n'), ((42027, 42072), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.v, axis=1, keepdims=True)\n', (42041, 42072), True, 'import numpy as np\n'), ((42109, 42159), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v_init'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.v_init, axis=1, keepdims=True)\n', (42123, 42159), True, 'import numpy as np\n'), ((42422, 42437), 'numpy.arccos', 'np.arccos', (['proj'], {}), '(proj)\n', (42431, 42437), True, 'import numpy as np\n'), ((46023, 46040), 'numpy.sum', 'np.sum', (['intensity'], {}), '(intensity)\n', (46029, 46040), True, 'import numpy as np\n'), ((46100, 46268), 'warnings.warn', 'warnings.warn', (['f"""Only {percentage:.2%} of the particles are shown on this synthetic radiograph. Consider increasing the size to include more."""', 'RuntimeWarning'], {}), "(\n f'Only {percentage:.2%} of the particles are shown on this synthetic radiograph. Consider increasing the size to include more.'\n , RuntimeWarning)\n", (46113, 46268), False, 'import warnings\n'), ((46669, 46689), 'numpy.mean', 'np.mean', (['I0[I0 != 0]'], {}), '(I0[I0 != 0])\n', (46676, 46689), True, 'import numpy as np\n'), ((5955, 5984), 'numpy.linalg.norm', 'np.linalg.norm', (['self.detector'], {}), '(self.detector)\n', (5969, 5984), True, 'import numpy as np\n'), ((5987, 6014), 'numpy.linalg.norm', 'np.linalg.norm', (['self.source'], {}), '(self.source)\n', (6001, 6014), True, 'import numpy as np\n'), ((7031, 7060), 'numpy.linalg.norm', 'np.linalg.norm', (['detector_hdir'], {}), '(detector_hdir)\n', (7045, 7060), True, 'import numpy as np\n'), ((8224, 8245), 'numpy.abs', 'np.abs', (['self.grid[rq]'], {}), '(self.grid[rq])\n', (8230, 8245), True, 'import numpy as np\n'), ((8786, 9065), 'warnings.warn', 'warnings.warn', (['f"""Fields should go to zero at edges of grid to avoid non-physical effects, but a value of {edge_max:.2E} {unit} was found on the edge of the {rq} array. Consider applying a envelope function to force the fields at the edge to go to zero."""', 'RuntimeWarning'], {}), "(\n f'Fields should go to zero at edges of grid to avoid non-physical effects, but a value of {edge_max:.2E} {unit} was found on the edge of the {rq} array. Consider applying a envelope function to force the fields at the edge to go to zero.'\n , RuntimeWarning)\n", (8799, 9065), False, 'import warnings\n'), ((9638, 9657), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (9646, 9657), True, 'import numpy as np\n'), ((15032, 15057), 'numpy.linalg.norm', 'np.linalg.norm', (['mesh_hdir'], {}), '(mesh_hdir)\n', (15046, 15057), True, 'import numpy as np\n'), ((15182, 15207), 'numpy.linalg.norm', 'np.linalg.norm', (['mesh_vdir'], {}), '(mesh_vdir)\n', (15196, 15207), True, 'import numpy as np\n'), ((15258, 15283), 'numpy.linalg.norm', 'np.linalg.norm', (['mesh_vdir'], {}), '(mesh_vdir)\n', (15272, 15283), True, 'import numpy as np\n'), ((18003, 18048), 'numpy.isclose', 'np.isclose', (['loc_rad', 'radius'], {'atol': 'wire_radius'}), '(loc_rad, radius, atol=wire_radius)\n', (18013, 18048), True, 'import numpy as np\n'), ((24775, 24788), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (24781, 24788), True, 'import numpy as np\n'), ((24831, 24844), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (24837, 24844), True, 'import numpy as np\n'), ((27145, 27173), 'numpy.inner', 'np.inner', (['self.v', 'self.src_n'], {}), '(self.v, self.src_n)\n', (27153, 27173), True, 'import numpy as np\n'), ((27176, 27207), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v'], {'axis': '(-1)'}), '(self.v, axis=-1)\n', (27190, 27207), True, 'import numpy as np\n'), ((28324, 28334), 'numpy.min', 'np.min', (['ds'], {}), '(ds)\n', (28330, 28334), True, 'import numpy as np\n'), ((34982, 35011), 'numpy.nonzero', 'np.nonzero', (['self.entered_grid'], {}), '(self.entered_grid)\n', (34992, 35011), True, 'import numpy as np\n'), ((35364, 35384), 'numpy.sum', 'np.sum', (['self.on_grid'], {}), '(self.on_grid)\n', (35370, 35384), True, 'import numpy as np\n'), ((35387, 35411), 'numpy.sum', 'np.sum', (['self.num_entered'], {}), '(self.num_entered)\n', (35393, 35411), True, 'import numpy as np\n'), ((35656, 35848), 'warnings.warn', 'warnings.warn', (['f"""Only {100 * self.num_entered / self.nparticles:.2f}% of particles entered the field grid: consider decreasing the max_theta to increase this number."""', 'RuntimeWarning'], {}), "(\n f'Only {100 * self.num_entered / self.nparticles:.2f}% of particles entered the field grid: consider decreasing the max_theta to increase this number.'\n , RuntimeWarning)\n", (35669, 35848), False, 'import warnings\n'), ((38050, 38073), 'numpy.array', 'np.array', (['[0.0, np.inf]'], {}), '([0.0, np.inf])\n', (38058, 38073), True, 'import numpy as np\n'), ((46868, 46892), 'numpy.log10', 'np.log10', (['(intensity / I0)'], {}), '(intensity / I0)\n', (46876, 46892), True, 'import numpy as np\n'), ((8698, 8709), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (8704, 8709), True, 'import numpy as np\n'), ((20249, 20273), 'numpy.sqrt', 'np.sqrt', (['self.nparticles'], {}), '(self.nparticles)\n', (20256, 20273), True, 'import numpy as np\n'), ((28681, 28693), 'numpy.max', 'np.max', (['Bmag'], {}), '(Bmag)\n', (28687, 28693), True, 'import numpy as np\n'), ((31801, 31859), 'numpy.logical_not', 'np.logical_not', (['((v_towards_det < 0) & (dist_remaining > 0))'], {}), '((v_towards_det < 0) & (dist_remaining > 0))\n', (31815, 31859), True, 'import numpy as np\n'), ((45503, 45531), 'numpy.array', 'np.array', (['[[-w, w], [-w, w]]'], {}), '([[-w, w], [-w, w]])\n', (45511, 45531), True, 'import numpy as np\n'), ((45837, 45851), 'numpy.roll', 'np.roll', (['h', '(-1)'], {}), '(h, -1)\n', (45844, 45851), True, 'import numpy as np\n'), ((45882, 45896), 'numpy.roll', 'np.roll', (['v', '(-1)'], {}), '(v, -1)\n', (45889, 45896), True, 'import numpy as np\n'), ((7732, 7764), 'numpy.isfinite', 'np.isfinite', (['self.grid[rq].value'], {}), '(self.grid[rq].value)\n', (7743, 7764), True, 'import numpy as np\n'), ((8355, 8375), 'numpy.max', 'np.max', (['arr[0, :, :]'], {}), '(arr[0, :, :])\n', (8361, 8375), True, 'import numpy as np\n'), ((8401, 8422), 'numpy.max', 'np.max', (['arr[-1, :, :]'], {}), '(arr[-1, :, :])\n', (8407, 8422), True, 'import numpy as np\n'), ((8448, 8468), 'numpy.max', 'np.max', (['arr[:, 0, :]'], {}), '(arr[:, 0, :])\n', (8454, 8468), True, 'import numpy as np\n'), ((8494, 8515), 'numpy.max', 'np.max', (['arr[:, -1, :]'], {}), '(arr[:, -1, :])\n', (8500, 8515), True, 'import numpy as np\n'), ((8541, 8561), 'numpy.max', 'np.max', (['arr[:, :, 0]'], {}), '(arr[:, :, 0])\n', (8547, 8561), True, 'import numpy as np\n'), ((8587, 8608), 'numpy.max', 'np.max', (['arr[:, :, -1]'], {}), '(arr[:, :, -1])\n', (8593, 8608), True, 'import numpy as np\n'), ((28471, 28507), 'numpy.sqrt', 'np.sqrt', (['(Bx ** 2 + By ** 2 + Bz ** 2)'], {}), '(Bx ** 2 + By ** 2 + Bz ** 2)\n', (28478, 28507), True, 'import numpy as np\n'), ((1698, 1707), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1704, 1707), True, 'import numpy as np\n'), ((1749, 1758), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1755, 1758), True, 'import numpy as np\n'), ((10557, 10585), 'numpy.linalg.norm', 'np.linalg.norm', (['self.src_det'], {}), '(self.src_det)\n', (10571, 10585), True, 'import numpy as np\n'), ((17320, 17337), 'numpy.max', 'np.max', (['h_centers'], {}), '(h_centers)\n', (17326, 17337), True, 'import numpy as np\n'), ((17380, 17397), 'numpy.min', 'np.min', (['h_centers'], {}), '(h_centers)\n', (17386, 17397), True, 'import numpy as np\n'), ((17583, 17600), 'numpy.max', 'np.max', (['v_centers'], {}), '(v_centers)\n', (17589, 17600), True, 'import numpy as np\n'), ((17643, 17660), 'numpy.min', 'np.min', (['v_centers'], {}), '(v_centers)\n', (17649, 17660), True, 'import numpy as np\n'), ((1989, 1998), 'numpy.cos', 'np.cos', (['p'], {}), '(p)\n', (1995, 1998), True, 'import numpy as np\n'), ((2052, 2061), 'numpy.sin', 'np.sin', (['p'], {}), '(p)\n', (2058, 2061), True, 'import numpy as np\n'), ((2103, 2112), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2109, 2112), True, 'import numpy as np\n'), ((10459, 10484), 'numpy.dot', 'np.dot', (['vec', 'self.src_det'], {}), '(vec, self.src_det)\n', (10465, 10484), True, 'import numpy as np\n'), ((10511, 10530), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (10525, 10530), True, 'import numpy as np\n'), ((1977, 1986), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1983, 1986), True, 'import numpy as np\n'), ((2040, 2049), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2046, 2049), True, 'import numpy as np\n')]
|
# switch_start.py
# Adding another switch statement
# Authors : <NAME>
import string
import random
class Switch_Start:
def __init__(self, str):
self.string = str
def insert_switch(self, str):
#generate random variable
_LENGTH = 11
string_pool = string.ascii_letters + string.digits
num_pool = string.digits
var1 = random.choice(string.ascii_letters)
for i in range(_LENGTH):
var1 += random.choice(string_pool)
#writing another switch statement
first = "{int "
case0 = "switch (0) { case 0:"
case1 = "; case 1:"
case2 = "; case 2:"
case3 = "; case 3:"
last = "; }}"
result = str + first + var1 + "="+random.choice(num_pool)+";" + case0 + var1 + "++" + case1 + var1 + "--" + case2 + var1 + "++" + case3 + var1 + "--" + last
return result
|
[
"random.choice"
] |
[((389, 424), 'random.choice', 'random.choice', (['string.ascii_letters'], {}), '(string.ascii_letters)\n', (402, 424), False, 'import random\n'), ((480, 506), 'random.choice', 'random.choice', (['string_pool'], {}), '(string_pool)\n', (493, 506), False, 'import random\n'), ((772, 795), 'random.choice', 'random.choice', (['num_pool'], {}), '(num_pool)\n', (785, 795), False, 'import random\n')]
|
from numpy import array, rad2deg, pi, mgrid, argmin
from matplotlib.pylab import contour
import matplotlib.pyplot as plt
import mplstereonet
from obspy.imaging.beachball import aux_plane
from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm
from focal_mech.io.read_hash import read_demo, read_hash_solutions
from focal_mech.util.hash_routines import hash_to_classifier
from focal_mech.lib.sph_harm import get_sph_harm
from focal_mech.lib.correlate import corr_shear
hash_solns = read_hash_solutions("example1.out")
# we want solutions that are symetric
polarity_data = read_demo("north1.phase", "scsn.reverse", reverse=True)
inputs = hash_to_classifier(polarity_data, parity=1)
event = 3146815
result = classify(*inputs[event], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth1 = c.collections[0].get_paths()[0].vertices
pth1 = rad2deg(pth1)
pth2 = c.collections[0].get_paths()[1].vertices
pth2 = rad2deg(pth2)
hash_focal = rad2deg(hash_solns[event])
event2 = 3158361
result = classify(*inputs[event2], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln2, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth3 = c.collections[0].get_paths()[0].vertices
pth3 = rad2deg(pth3)
pth4 = c.collections[0].get_paths()[1].vertices
pth4 = rad2deg(pth4)
hash_focal2 = rad2deg(hash_solns[event2])
event3 = 3153955
result = classify(*inputs[event3], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln3, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth5 = c.collections[0].get_paths()[0].vertices
pth5 = rad2deg(pth5)
pth6 = c.collections[0].get_paths()[1].vertices
pth6 = rad2deg(pth6)
hash_focal3 = rad2deg(hash_solns[event3])
fig = plt.figure(facecolor="white", figsize=(10,20))
ax = fig.add_subplot(221, projection='stereonet')
ax.rake(pth1[:,0], pth1[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth2[:,0], pth2[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event][:,0])
toa = rad2deg(polarity_data[event][:,1])
polarity = polarity_data[event][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
ax = fig.add_subplot(222, projection='stereonet')
ax.rake(pth3[:,0], pth3[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth4[:,0], pth4[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln2
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln2)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal2
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal2)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event2][:,0])
toa = rad2deg(polarity_data[event2][:,1])
polarity = polarity_data[event2][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
ax = fig.add_subplot(224, projection='stereonet')
ax.rake(pth5[:,0], pth5[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth6[:,0], pth6[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln3
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln3)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal3
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal3)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event3][:,0])
toa = rad2deg(polarity_data[event3][:,1])
polarity = polarity_data[event3][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
plt.tight_layout(pad=4.0, h_pad=20.0)
plt.show()
|
[
"focal_mech.lib.classify_mechanism.classify",
"focal_mech.util.hash_routines.hash_to_classifier",
"matplotlib.pylab.contour",
"focal_mech.io.read_hash.read_hash_solutions",
"focal_mech.io.read_hash.read_demo",
"numpy.array",
"matplotlib.pyplot.figure",
"obspy.imaging.beachball.aux_plane",
"focal_mech.lib.classify_mechanism.translate_to_sphharm",
"matplotlib.pyplot.tight_layout",
"focal_mech.lib.correlate.corr_shear",
"focal_mech.lib.sph_harm.get_sph_harm",
"numpy.rad2deg",
"matplotlib.pyplot.show"
] |
[((507, 542), 'focal_mech.io.read_hash.read_hash_solutions', 'read_hash_solutions', (['"""example1.out"""'], {}), "('example1.out')\n", (526, 542), False, 'from focal_mech.io.read_hash import read_demo, read_hash_solutions\n'), ((598, 653), 'focal_mech.io.read_hash.read_demo', 'read_demo', (['"""north1.phase"""', '"""scsn.reverse"""'], {'reverse': '(True)'}), "('north1.phase', 'scsn.reverse', reverse=True)\n", (607, 653), False, 'from focal_mech.io.read_hash import read_demo, read_hash_solutions\n'), ((663, 706), 'focal_mech.util.hash_routines.hash_to_classifier', 'hash_to_classifier', (['polarity_data'], {'parity': '(1)'}), '(polarity_data, parity=1)\n', (681, 706), False, 'from focal_mech.util.hash_routines import hash_to_classifier\n'), ((734, 775), 'focal_mech.lib.classify_mechanism.classify', 'classify', (['*inputs[event]'], {'kernel_degree': '(2)'}), '(*inputs[event], kernel_degree=2)\n', (742, 775), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((782, 828), 'focal_mech.lib.classify_mechanism.translate_to_sphharm', 'translate_to_sphharm', (['*result'], {'kernel_degree': '(2)'}), '(*result, kernel_degree=2)\n', (802, 828), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((839, 952), 'numpy.array', 'array', (['[Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1], Alm[2,\n 0], Alm[2, 1], Alm[2, 2]]'], {}), '([Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1],\n Alm[2, 0], Alm[2, 1], Alm[2, 2]])\n', (844, 952), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((990, 1005), 'focal_mech.lib.correlate.corr_shear', 'corr_shear', (['Alm'], {}), '(Alm)\n', (1000, 1005), False, 'from focal_mech.lib.correlate import corr_shear\n'), ((1047, 1082), 'focal_mech.lib.sph_harm.get_sph_harm', 'get_sph_harm', ([], {'resolution': 'resolution'}), '(resolution=resolution)\n', (1059, 1082), False, 'from focal_mech.lib.sph_harm import get_sph_harm\n'), ((1188, 1219), 'matplotlib.pylab.contour', 'contour', (['longi', 'lati', 'mech', '[0]'], {}), '(longi, lati, mech, [0])\n', (1195, 1219), False, 'from matplotlib.pylab import contour\n'), ((1275, 1288), 'numpy.rad2deg', 'rad2deg', (['pth1'], {}), '(pth1)\n', (1282, 1288), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((1345, 1358), 'numpy.rad2deg', 'rad2deg', (['pth2'], {}), '(pth2)\n', (1352, 1358), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((1373, 1399), 'numpy.rad2deg', 'rad2deg', (['hash_solns[event]'], {}), '(hash_solns[event])\n', (1380, 1399), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((1430, 1472), 'focal_mech.lib.classify_mechanism.classify', 'classify', (['*inputs[event2]'], {'kernel_degree': '(2)'}), '(*inputs[event2], kernel_degree=2)\n', (1438, 1472), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((1479, 1525), 'focal_mech.lib.classify_mechanism.translate_to_sphharm', 'translate_to_sphharm', (['*result'], {'kernel_degree': '(2)'}), '(*result, kernel_degree=2)\n', (1499, 1525), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((1536, 1649), 'numpy.array', 'array', (['[Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1], Alm[2,\n 0], Alm[2, 1], Alm[2, 2]]'], {}), '([Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1],\n Alm[2, 0], Alm[2, 1], Alm[2, 2]])\n', (1541, 1649), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((1687, 1702), 'focal_mech.lib.correlate.corr_shear', 'corr_shear', (['Alm'], {}), '(Alm)\n', (1697, 1702), False, 'from focal_mech.lib.correlate import corr_shear\n'), ((1744, 1779), 'focal_mech.lib.sph_harm.get_sph_harm', 'get_sph_harm', ([], {'resolution': 'resolution'}), '(resolution=resolution)\n', (1756, 1779), False, 'from focal_mech.lib.sph_harm import get_sph_harm\n'), ((1885, 1916), 'matplotlib.pylab.contour', 'contour', (['longi', 'lati', 'mech', '[0]'], {}), '(longi, lati, mech, [0])\n', (1892, 1916), False, 'from matplotlib.pylab import contour\n'), ((1972, 1985), 'numpy.rad2deg', 'rad2deg', (['pth3'], {}), '(pth3)\n', (1979, 1985), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2042, 2055), 'numpy.rad2deg', 'rad2deg', (['pth4'], {}), '(pth4)\n', (2049, 2055), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2071, 2098), 'numpy.rad2deg', 'rad2deg', (['hash_solns[event2]'], {}), '(hash_solns[event2])\n', (2078, 2098), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2129, 2171), 'focal_mech.lib.classify_mechanism.classify', 'classify', (['*inputs[event3]'], {'kernel_degree': '(2)'}), '(*inputs[event3], kernel_degree=2)\n', (2137, 2171), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((2178, 2224), 'focal_mech.lib.classify_mechanism.translate_to_sphharm', 'translate_to_sphharm', (['*result'], {'kernel_degree': '(2)'}), '(*result, kernel_degree=2)\n', (2198, 2224), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((2235, 2348), 'numpy.array', 'array', (['[Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1], Alm[2,\n 0], Alm[2, 1], Alm[2, 2]]'], {}), '([Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1],\n Alm[2, 0], Alm[2, 1], Alm[2, 2]])\n', (2240, 2348), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2386, 2401), 'focal_mech.lib.correlate.corr_shear', 'corr_shear', (['Alm'], {}), '(Alm)\n', (2396, 2401), False, 'from focal_mech.lib.correlate import corr_shear\n'), ((2443, 2478), 'focal_mech.lib.sph_harm.get_sph_harm', 'get_sph_harm', ([], {'resolution': 'resolution'}), '(resolution=resolution)\n', (2455, 2478), False, 'from focal_mech.lib.sph_harm import get_sph_harm\n'), ((2584, 2615), 'matplotlib.pylab.contour', 'contour', (['longi', 'lati', 'mech', '[0]'], {}), '(longi, lati, mech, [0])\n', (2591, 2615), False, 'from matplotlib.pylab import contour\n'), ((2671, 2684), 'numpy.rad2deg', 'rad2deg', (['pth5'], {}), '(pth5)\n', (2678, 2684), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2741, 2754), 'numpy.rad2deg', 'rad2deg', (['pth6'], {}), '(pth6)\n', (2748, 2754), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2770, 2797), 'numpy.rad2deg', 'rad2deg', (['hash_solns[event3]'], {}), '(hash_solns[event3])\n', (2777, 2797), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2806, 2853), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""', 'figsize': '(10, 20)'}), "(facecolor='white', figsize=(10, 20))\n", (2816, 2853), True, 'import matplotlib.pyplot as plt\n'), ((3142, 3162), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*svm_soln'], {}), '(*svm_soln)\n', (3151, 3162), False, 'from obspy.imaging.beachball import aux_plane\n'), ((3301, 3323), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*hash_focal'], {}), '(*hash_focal)\n', (3310, 3323), False, 'from obspy.imaging.beachball import aux_plane\n'), ((3374, 3409), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event][:, 0]'], {}), '(polarity_data[event][:, 0])\n', (3381, 3409), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((3415, 3450), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event][:, 1]'], {}), '(polarity_data[event][:, 1])\n', (3422, 3450), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((4002, 4023), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*svm_soln2'], {}), '(*svm_soln2)\n', (4011, 4023), False, 'from obspy.imaging.beachball import aux_plane\n'), ((4163, 4186), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*hash_focal2'], {}), '(*hash_focal2)\n', (4172, 4186), False, 'from obspy.imaging.beachball import aux_plane\n'), ((4237, 4273), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event2][:, 0]'], {}), '(polarity_data[event2][:, 0])\n', (4244, 4273), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((4279, 4315), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event2][:, 1]'], {}), '(polarity_data[event2][:, 1])\n', (4286, 4315), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((4868, 4889), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*svm_soln3'], {}), '(*svm_soln3)\n', (4877, 4889), False, 'from obspy.imaging.beachball import aux_plane\n'), ((5029, 5052), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*hash_focal3'], {}), '(*hash_focal3)\n', (5038, 5052), False, 'from obspy.imaging.beachball import aux_plane\n'), ((5103, 5139), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event3][:, 0]'], {}), '(polarity_data[event3][:, 0])\n', (5110, 5139), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((5145, 5181), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event3][:, 1]'], {}), '(polarity_data[event3][:, 1])\n', (5152, 5181), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((5444, 5481), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(4.0)', 'h_pad': '(20.0)'}), '(pad=4.0, h_pad=20.0)\n', (5460, 5481), True, 'import matplotlib.pyplot as plt\n'), ((5483, 5493), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5491, 5493), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.ensemble
import sklearn.metrics
import sklearn
import progressbar
import sklearn.model_selection
from plotnine import *
import pdb
import sys
sys.path.append("smooth_rf/")
import smooth_base
import smooth_level
# function
def average_depth(random_forest, data):
"""
calculate the average depth of each point (average across trees)
Arguments:
----------
random_forest : sklearn random forest model (fit)
data : array (n, p)
data frame that can be predicted from random_forest
Returns:
--------
average_depth : array (n,)
vector of average depth in forest of each data point
"""
# test:
#rf_fit
#smooth_rf_opt
#d1 = average_depth(rf_fit, data)
#d2 = average_depth(smooth_rf_opt, data)
#np.all(d1 == d2)
n_trees = len(random_forest.estimators_)
n_obs = data.shape[0]
depth = np.zeros(n_obs)
for t in random_forest.estimators_:
d_path = t.decision_path(data)
depth = depth + np.array(d_path.sum(axis = 1)).ravel()
return depth / n_trees
# start of analysis
data, y = smooth_base.generate_data(large_n = 650)
data_vis = pd.DataFrame(data = {"x1":data[:,0],
"x2":data[:,1],
"y":y},
columns = ["x1","x2","y"])
ggout = ggplot(data_vis) +\
geom_point(aes(x = "x1",y ="x2", color = "factor(y)")) +\
theme_minimal() +\
labs(x= "X1", y = "X2", color = "value (minus 100)")
rf = sklearn.ensemble.RandomForestRegressor(n_estimators = 300)
rf_fit = rf.fit(data,y)
smooth_rf_opt, smooth_rf_last ,_, _ = smooth_base.smooth(
rf_fit,
X_trained = data,
y_trained = y.ravel(),
X_tune = None,
y_tune = None,
resample_tune= False, # oob
no_constraint = False,
subgrad_max_num = 10000,
subgrad_t_fix = 1,
parents_all=True,
verbose = True,
all_trees = False,
initial_lamb_seed = None)
# test data
data_test, y_test = smooth_base.generate_data(large_n = 10000)
reorder = np.random.choice(data_test.shape[0],
size = data_test.shape[0], replace= False)
data_test = data_test[reorder,:]
y_test = y_test[reorder]
yhat_base = rf_fit.predict(data_test)
yhat_smooth = smooth_rf_opt.predict(data_test)
base_mse = sklearn.metrics.mean_squared_error(y_true = y_test, y_pred = yhat_base)
smooth_mse = sklearn.metrics.mean_squared_error(y_true = y_test, y_pred = yhat_smooth)
error_base = np.abs(yhat_base - y_test)
error_smooth = np.abs(yhat_smooth - y_test)
extreme_binary = np.max([np.max(np.abs(error_base)),
np.max(np.abs(error_smooth))])
col_vis = error_base - error_smooth
extreme = np.max(np.abs(col_vis))
mean_depth_test = average_depth(rf_fit,data_test)
data_vis = pd.DataFrame(data = {"X1":data_test[:,0],
"X2":data_test[:,1],
"y": y_test.ravel(),
"error_base":error_base.copy(),
"error_smooth":error_smooth.copy(),
"error":col_vis.copy(),
"mean_depth":mean_depth_test.copy()},
columns = ["X1","X2","y","error",
"error_base","error_smooth",
"mean_depth"])
a = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error"),
size = .5) +\
scale_color_continuous(name = "bwr",
limits= [-extreme, extreme]) +\
theme_bw() +\
labs(color = "Difference in Error",
title = r'Difference in Error ($Error_{base} - Error_{smooth}$)')
b = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error_base"),
size = .5) +\
scale_color_continuous(name = "binary",
limits= [0, extreme_binary]) +\
theme_bw() +\
labs(color = "Error",
title = "Error from Base Random Forest")
c = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error_smooth"),
size = .5) +\
scale_color_continuous(name = "binary",
limits= [0, extreme_binary]) +\
theme_bw() +\
labs(color = "Error",
title = "Error from Smoothed Random Forest")
d = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "factor(y)"),
size = .5) +\
theme_bw() +\
labs(color = "True Value (discrete)",
title = "Test Set True Values")
e = ggplot(data_vis,aes(x = "mean_depth", y = "error")) +\
geom_point(alpha = .1) +\
theme_bw() +\
labs(x = "Mean depth in Forest",
y = "Difference in Error",
title = "Lack of relationship between diff in errors and depth")
f = ggplot(data_vis, aes(x = "X1", y = "X2", color = "mean_depth")) +\
geom_point() +\
scale_color_continuous(name = "Blues") +\
theme_bw() +\
labs(color = "Mean depth in Forest",
title = "Mean depth in Forest (Depth averaged across trees)")
g = ggplot(data_vis) +\
geom_point(aes(x = "error_base", y = "error_smooth"),
alpha = .05) +\
geom_abline(intercept = 0, slope = 1) +\
theme_bw() +\
labs(x = "Error from Random Forest",
y = "Error from Smooth Random Forest",
title = "Comparing Errors Between Models",
subtitle = r"(total error: rf: %f vs srf: %f)" %\
(base_mse, smooth_mse))
save_as_pdf_pages([a + theme(figure_size = (8,6))],
filename = "images/diff_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([b + theme(figure_size = (8,6))],
filename = "images/error_base"+"_understanding_smoothing.pdf")
save_as_pdf_pages([c + theme(figure_size = (8,6))],
filename = "images/error_smooth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([d + theme(figure_size = (8,6))],
filename = "images/truth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([e + theme(figure_size = (8,6))],
filename = "images/mean_depth_diff_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([f + theme(figure_size = (8,6))],
filename = "images/mean_depth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([g + theme(figure_size = (8,6))],
filename = "images/error_vs_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([a + theme(figure_size = (8,6)),
b + theme(figure_size = (8,6)),
c + theme(figure_size = (8,6)),
d + theme(figure_size = (8,6)),
e + theme(figure_size = (8,6)),
f + theme(figure_size = (8,6)),
g + theme(figure_size = (8,6))],
filename = "images/understanding_smoothing.pdf")
# some of these observations might be due to the decision on the values of the classes
# we'll see
|
[
"numpy.abs",
"sklearn.ensemble.RandomForestRegressor",
"numpy.random.choice",
"sklearn.metrics.mean_squared_error",
"numpy.zeros",
"smooth_base.generate_data",
"pandas.DataFrame",
"sys.path.append"
] |
[((229, 258), 'sys.path.append', 'sys.path.append', (['"""smooth_rf/"""'], {}), "('smooth_rf/')\n", (244, 258), False, 'import sys\n'), ((1181, 1219), 'smooth_base.generate_data', 'smooth_base.generate_data', ([], {'large_n': '(650)'}), '(large_n=650)\n', (1206, 1219), False, 'import smooth_base\n'), ((1234, 1329), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'x1': data[:, 0], 'x2': data[:, 1], 'y': y}", 'columns': "['x1', 'x2', 'y']"}), "(data={'x1': data[:, 0], 'x2': data[:, 1], 'y': y}, columns=[\n 'x1', 'x2', 'y'])\n", (1246, 1329), True, 'import pandas as pd\n'), ((1589, 1645), 'sklearn.ensemble.RandomForestRegressor', 'sklearn.ensemble.RandomForestRegressor', ([], {'n_estimators': '(300)'}), '(n_estimators=300)\n', (1627, 1645), False, 'import sklearn\n'), ((2434, 2474), 'smooth_base.generate_data', 'smooth_base.generate_data', ([], {'large_n': '(10000)'}), '(large_n=10000)\n', (2459, 2474), False, 'import smooth_base\n'), ((2488, 2564), 'numpy.random.choice', 'np.random.choice', (['data_test.shape[0]'], {'size': 'data_test.shape[0]', 'replace': '(False)'}), '(data_test.shape[0], size=data_test.shape[0], replace=False)\n', (2504, 2564), True, 'import numpy as np\n'), ((2751, 2818), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', ([], {'y_true': 'y_test', 'y_pred': 'yhat_base'}), '(y_true=y_test, y_pred=yhat_base)\n', (2785, 2818), False, 'import sklearn\n'), ((2836, 2905), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', ([], {'y_true': 'y_test', 'y_pred': 'yhat_smooth'}), '(y_true=y_test, y_pred=yhat_smooth)\n', (2870, 2905), False, 'import sklearn\n'), ((2925, 2951), 'numpy.abs', 'np.abs', (['(yhat_base - y_test)'], {}), '(yhat_base - y_test)\n', (2931, 2951), True, 'import numpy as np\n'), ((2967, 2995), 'numpy.abs', 'np.abs', (['(yhat_smooth - y_test)'], {}), '(yhat_smooth - y_test)\n', (2973, 2995), True, 'import numpy as np\n'), ((959, 974), 'numpy.zeros', 'np.zeros', (['n_obs'], {}), '(n_obs)\n', (967, 974), True, 'import numpy as np\n'), ((3159, 3174), 'numpy.abs', 'np.abs', (['col_vis'], {}), '(col_vis)\n', (3165, 3174), True, 'import numpy as np\n'), ((3029, 3047), 'numpy.abs', 'np.abs', (['error_base'], {}), '(error_base)\n', (3035, 3047), True, 'import numpy as np\n'), ((3081, 3101), 'numpy.abs', 'np.abs', (['error_smooth'], {}), '(error_smooth)\n', (3087, 3101), True, 'import numpy as np\n')]
|
'''
Created on Sep 29, 2021
@author: thomas
'''
import ImageNetTools
import sys
import getopt
def main(argv):
try:
opts, args = getopt.getopt(argv,"hd:",["dataset="])
except getopt.GetoptError:
printHelp()
sys.exit(2)
for opt, arg in opts:
if opt in ("-d", "--dataset"):
ImageNetTools.benchmarkIOSpeeds(arg)
sys.exit()
def printHelp():
print('Run IO Speed testing with a given Dataset')
print('python iotest.py -d /path/to/dataset' )
main(sys.argv[1:])
|
[
"ImageNetTools.benchmarkIOSpeeds",
"getopt.getopt",
"sys.exit"
] |
[((412, 422), 'sys.exit', 'sys.exit', ([], {}), '()\n', (420, 422), False, 'import sys\n'), ((167, 207), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hd:"""', "['dataset=']"], {}), "(argv, 'hd:', ['dataset='])\n", (180, 207), False, 'import getopt\n'), ((265, 276), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (273, 276), False, 'import sys\n'), ((366, 402), 'ImageNetTools.benchmarkIOSpeeds', 'ImageNetTools.benchmarkIOSpeeds', (['arg'], {}), '(arg)\n', (397, 402), False, 'import ImageNetTools\n')]
|
import json
import math
from HistoricalTweetDataFetcher import getHistoricalData
joelsarray = getHistoricalData(0)
arrs = []
arrm = []
arrp = []
arrsTotal = 0
arrmTotal = 0
ncount = 0
ccount = 0
lcount = 0
time = joelsarray[0]["h"]
for dictionary in joelsarray:
arrs.append(dictionary["s"])
arrm.append(dictionary["m"])
arrp.append(dictionary["p"])
for x in range(len(arrs)):
arrsTotal += arrs[x]
arrmTotal += arrm[x]
if arrp[x]=='l':
lcount += 1
elif arrp[x]=='c':
ccount += 1
elif arrp[x]=='n':
ncount += 1
arrsAvg = arrsTotal/len(arrs)#sentiment value
arrmAvg = arrmTotal/len(arrm)#magnitude value
#print(arrsTotal)
#print(len(arrs))
#rint(arrsAvg)
#print(arrmAvg)
#print(lcount)
#print(ccount)
###################################################################
filename2 = "weather_us.json"
if filename2:
with open(filename2, 'r') as f:
weatherstore = json.load(f)
for x in range(50):
statearray = list(weatherstore.keys())
statesAverage = 0
for state in statearray:
for x in range(50):
temptemp = float(weatherstore[state]["temperature"])
temphigh = float(weatherstore[state]["average_monthly_high"])
templow = float(weatherstore[state]["average_monthly_low"])
statesAverage+=((temptemp-temphigh)*(templow-temptemp))/(math.pow(((temphigh+templow)/2),2))
statesAverage = statesAverage/50 #this is the average tempeature multiplyer
print(statesAverage)
#####################################################################################
filename3 = "sp500_price.json"
if filename3:
with open(filename3, 'r') as f:
stockdata = json.load(f)
stockpricecurrent = stockdata["current_price"]
stockpricechange = stockdata["percent_change"]#percent change of S&P500
if stockpricechange <= 0.73 and stockpricechange >=-0.73:
stockmultiply = 0;
else:
stockmultiply = stockpricechange*0.5*0.73
print(stockpricechange)
#########################################################################################
filename4 = "trump_approval_rating.json"
if filename4:
with open(filename4, 'r') as f:
approvalratingdata = json.load(f)
approveAvg = approvalratingdata["approve_avg"]#approval average data
currentApproval = approvalratingdata["approve"]#current approval percentage
########################################################################################
def equation(sentiment, stockmultiply, pollcurrent, pollaverage, avgtemp, lvalue, cvalue, ltweets, ctweet, time, const1 = 70, const2 = 60, const3 = 50, const4 = 45, const5 = 25, slideInput = True):
point = const1*(sentiment) + const2*(stockmultiply)+const3*((pollcurrent-pollaverage)/(pollaverage))+const4*avgtemp + const5/2*lvalue*ltweets+ const5/2*cvalue+ctweet+const5
filename5 = "data.json"
if(slideInput==True):
if filename5:
with open(filename5, 'r') as f:
outputdata = json.load(f)
print(outputdata)
outputdata["chartData"]["labels"][0]=outputdata["chartData"]["labels"][1]
outputdata["chartData"]["labels"][1]=outputdata["chartData"]["labels"][2]
outputdata["chartData"]["labels"][2]=outputdata["chartData"]["labels"][3]
outputdata["chartData"]["labels"][3]=outputdata["chartData"]["labels"][4]
outputdata["chartData"]["labels"][4]=outputdata["chartData"]["labels"][5]
outputdata["chartData"]["labels"][5]=outputdata["chartData"]["labels"][6]
outputdata["chartData"]["labels"][6] = str(time)+":00"
outputdata["chartData"]["thisWeek"][0]=outputdata["chartData"]["thisWeek"][1]
outputdata["chartData"]["thisWeek"][1]=outputdata["chartData"]["thisWeek"][2]
outputdata["chartData"]["thisWeek"][2]=outputdata["chartData"]["thisWeek"][3]
outputdata["chartData"]["thisWeek"][3]=outputdata["chartData"]["thisWeek"][4]
outputdata["chartData"]["thisWeek"][4]=outputdata["chartData"]["thisWeek"][5]
outputdata["chartData"]["thisWeek"][5]=outputdata["chartData"]["thisWeek"][6]
outputdata["chartData"]["thisWeek"][6] = point
with open(filename5, 'w') as f:
json.dump(outputdata, f)
else:
if filename5:
with open(filename5, 'r') as f:
outputdata = json.load(f)
print(outputdata)
outputdata["chartData"]["labels"][0]=outputdata["chartData"]["labels"][1]
outputdata["chartData"]["labels"][1]=outputdata["chartData"]["labels"][2]
outputdata["chartData"]["labels"][2]=outputdata["chartData"]["labels"][3]
outputdata["chartData"]["labels"][3]=outputdata["chartData"]["labels"][4]
outputdata["chartData"]["labels"][4]=outputdata["chartData"]["labels"][5]
outputdata["chartData"]["labels"][5]=outputdata["chartData"]["labels"][6]
outputdata["chartData"]["labels"][6] = str(time) + ":00"
outputdata["chartData"]["thisWeek"][0]=outputdata["chartData"]["thisWeek"][1]
outputdata["chartData"]["thisWeek"][1]=outputdata["chartData"]["thisWeek"][2]
outputdata["chartData"]["thisWeek"][2]=outputdata["chartData"]["thisWeek"][3]
outputdata["chartData"]["thisWeek"][3]=outputdata["chartData"]["thisWeek"][4]
outputdata["chartData"]["thisWeek"][4]=outputdata["chartData"]["thisWeek"][5]
outputdata["chartData"]["thisWeek"][5]=outputdata["chartData"]["thisWeek"][6]
outputdata["chartData"]["thisWeek"][6] = point
with open(filename5, 'w') as f:
json.dump(outputdata, f)
return point
my_list = equation(arrsAvg, stockmultiply, currentApproval, approveAvg, statesAverage, 0, 0, lcount, ccount, 17, 70, 60, 50, 45, 25)
|
[
"json.load",
"HistoricalTweetDataFetcher.getHistoricalData",
"math.pow",
"json.dump"
] |
[((99, 119), 'HistoricalTweetDataFetcher.getHistoricalData', 'getHistoricalData', (['(0)'], {}), '(0)\n', (116, 119), False, 'from HistoricalTweetDataFetcher import getHistoricalData\n'), ((973, 985), 'json.load', 'json.load', (['f'], {}), '(f)\n', (982, 985), False, 'import json\n'), ((1728, 1740), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1737, 1740), False, 'import json\n'), ((2245, 2257), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2254, 2257), False, 'import json\n'), ((1395, 1432), 'math.pow', 'math.pow', (['((temphigh + templow) / 2)', '(2)'], {}), '((temphigh + templow) / 2, 2)\n', (1403, 1432), False, 'import math\n'), ((4282, 4306), 'json.dump', 'json.dump', (['outputdata', 'f'], {}), '(outputdata, f)\n', (4291, 4306), False, 'import json\n'), ((5654, 5678), 'json.dump', 'json.dump', (['outputdata', 'f'], {}), '(outputdata, f)\n', (5663, 5678), False, 'import json\n'), ((3044, 3056), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3053, 3056), False, 'import json\n'), ((4416, 4428), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4425, 4428), False, 'import json\n')]
|
#!/usr/bin/env python3
import collections
import logging
import os
import typing
import unicodedata
from janome.tokenizer import Tokenizer
from transformers.file_utils import cached_path
from transformers.models.bert.tokenization_bert import BertTokenizer, WordpieceTokenizer, load_vocab
import bunkai.constant
"""
The original source code is from cl-tohoku/bert-japanese.
https://github.com/cl-tohoku/bert-japanese/blob/master/tokenization.py
The original source code is under Apache-2.0 License.
"""
logger = logging.getLogger(__name__)
KNOWN_PRETRAINED_VOCABS = {
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
}
class JanomeTokenizer(object):
"""Runs basic tokenization with Janome morphological parser."""
def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True):
"""
Construct a JanomeTokenizer.
:arg do_lower_case: (`optional`) boolean (default True)
Whether to lower case the input.
:arg never_split: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
:arg normalize_text: (`optional`) boolean (default True)
Whether to apply unicode normalization to text before tokenization.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split if never_split is not None else []
self.normalize_text = normalize_text
self.janome_tokenizer = Tokenizer()
def tokenize(self, text: str, *, never_split=None, **kwargs):
"""Tokenizes a piece of text."""
if self.normalize_text:
text = unicodedata.normalize("NFKC", text)
never_split = self.never_split + (never_split if never_split is not None else [])
tokens = self.janome_tokenizer.tokenize(text)
__tokens = []
last_index = 0
for t in tokens:
token = t.surface
token_start = text.index(token, last_index)
if last_index != token_start:
__tokens.append(text[last_index:token_start])
if self.do_lower_case and token not in never_split:
token = token.lower()
__tokens.append(token.lower())
else:
__tokens.append(token)
last_index = token_start + len(token)
if len(text) != last_index:
__tokens.append(text[last_index:])
assert text == "".join(__tokens), f"[{text}] != [{''.join(__tokens)}]"
return __tokens
class CharacterTokenizer(object):
"""Runs Character tokenziation."""
def __init__(self, vocab, unk_token, normalize_text=True):
self.vocab = vocab
self.unk_token = unk_token
self.normalize_text = normalize_text
def tokenize(self, text):
"""
Tokenize a piece of text into characters.
For example:
input = "apple"
output = ["a", "p", "p", "l", "e"]
:arg text: A single token or whitespace separated tokens.
This should have already been passed through `BasicTokenizer`.
:return: A list of characters.
"""
if self.normalize_text:
text = unicodedata.normalize("NFKC", text)
output_tokens = []
for char in text:
if char not in self.vocab:
output_tokens.append(self.unk_token)
continue
output_tokens.append(char)
return output_tokens
class JanomeSubwordsTokenizer(BertTokenizer):
def __init__(
self,
vocab_file,
*,
subword_tokenizer_type="wordpiece",
do_subword_tokenize: bool = True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs,
):
"""
Construct a JanomeSubwordsTokenizer.
:arg vocab_file: Path to a one-wordpiece-per-line vocabulary file.
:arg do_lower_case: (`optional`) boolean (default True)
Whether to lower case the input.
Only has an effect when do_basic_tokenize=True.
:arg do_word_tokenize: (`optional`) boolean (default True) Whether to do word tokenization.
:arg do_subword_tokenize: (`optional`) boolean (default True) Whether to do subword tokenization.
:arg word_tokenizer_type: (`optional`) string (default "basic")
Type of word tokenizer. basic / janome / pre_tokenize
:arg subword_tokenizer_type: (`optional`) string (default "wordpiece") Type of subword tokenizer.
:arg cls_token: No description.
"""
super(BertTokenizer, self).__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs,
)
if os.path.isfile(vocab_file):
self.vocab = load_vocab(vocab_file)
elif vocab_file in KNOWN_PRETRAINED_VOCABS:
url: str = f"https://s3.amazonaws.com/models.huggingface.co/bert/{vocab_file}/vocab.txt"
self.vocab = load_vocab(cached_path(url))
else:
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
)
# add new vocab
self.add_tokens([" ", bunkai.constant.METACHAR_LINE_BREAK])
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_word_tokenize = False
self.do_subword_tokenize = True
if do_subword_tokenize:
if subword_tokenizer_type == "wordpiece":
self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
elif subword_tokenizer_type == "character":
self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=self.unk_token)
else:
raise ValueError("Invalid subword_tokenizer_type '{}' is specified.".format(subword_tokenizer_type))
self.janome_tokenizer = JanomeTokenizer()
def tokenize(self, text: typing.Union[str, typing.List[str]]) -> typing.List[str]:
if isinstance(text, str):
morphemes = self.janome_tokenizer.tokenize(text)
elif isinstance(text, list) and all([isinstance(t, str) for t in text]):
morphemes = text
else:
raise Exception(f"Invalid input-type {text}")
if self.do_subword_tokenize:
split_tokens = []
for token in morphemes:
sts = [sub_token for sub_token in self.subword_tokenizer.tokenize(token)]
if len(sts) == 0:
split_tokens.append(token)
else:
split_tokens += sts
else:
split_tokens = morphemes
return split_tokens
|
[
"logging.getLogger",
"transformers.models.bert.tokenization_bert.load_vocab",
"os.path.isfile",
"janome.tokenizer.Tokenizer",
"transformers.file_utils.cached_path",
"unicodedata.normalize",
"transformers.models.bert.tokenization_bert.WordpieceTokenizer"
] |
[((516, 543), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (533, 543), False, 'import logging\n'), ((1735, 1746), 'janome.tokenizer.Tokenizer', 'Tokenizer', ([], {}), '()\n', (1744, 1746), False, 'from janome.tokenizer import Tokenizer\n'), ((5185, 5211), 'os.path.isfile', 'os.path.isfile', (['vocab_file'], {}), '(vocab_file)\n', (5199, 5211), False, 'import os\n'), ((1906, 1941), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKC"""', 'text'], {}), "('NFKC', text)\n", (1927, 1941), False, 'import unicodedata\n'), ((3467, 3502), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKC"""', 'text'], {}), "('NFKC', text)\n", (3488, 3502), False, 'import unicodedata\n'), ((5238, 5260), 'transformers.models.bert.tokenization_bert.load_vocab', 'load_vocab', (['vocab_file'], {}), '(vocab_file)\n', (5248, 5260), False, 'from transformers.models.bert.tokenization_bert import BertTokenizer, WordpieceTokenizer, load_vocab\n'), ((6152, 6214), 'transformers.models.bert.tokenization_bert.WordpieceTokenizer', 'WordpieceTokenizer', ([], {'vocab': 'self.vocab', 'unk_token': 'self.unk_token'}), '(vocab=self.vocab, unk_token=self.unk_token)\n', (6170, 6214), False, 'from transformers.models.bert.tokenization_bert import BertTokenizer, WordpieceTokenizer, load_vocab\n'), ((5450, 5466), 'transformers.file_utils.cached_path', 'cached_path', (['url'], {}), '(url)\n', (5461, 5466), False, 'from transformers.file_utils import cached_path\n')]
|
"""
Several methods for generating graphs from the stochastic block model.
"""
import itertools
import math
import random
import scipy.sparse
import numpy as np
def _get_num_pos_edges(c1_size, c2_size, same_cluster, self_loops, directed):
"""
Compute the number of possible edges between two clusters.
:param c1_size: The size of the first cluster
:param c2_size: The size of the second cluster
:param same_cluster: Whether these are the same cluster
:param self_loops: Whether we will generate self loops
:param directed: Whether we are generating a directed graph
:return: the number of possible edges between these clusters
"""
if not same_cluster:
# The number is simply the product of the number of vertices
return c1_size * c2_size
else:
# The base number is n choose 2
possible_edges_between_clusters = int((c1_size * (c1_size - 1)) / 2)
# If we are allowed self-loops, then add them on
if self_loops:
possible_edges_between_clusters += c1_size
# The number is normally the same for undirected and directed graphs, unless the clusters are the same, in which
# case the number for the directed graph is double since we need to consider both directions of each edge.
if directed:
possible_edges_between_clusters *= 2
# But if we are allowed self-loops, then we shouldn't double them since there is only one 'direction'.
if directed and self_loops:
possible_edges_between_clusters -= c1_size
return possible_edges_between_clusters
def _get_number_of_edges(c1_size, c2_size, prob, same_cluster, self_loops, directed):
"""
Compute the number of edges there will be between two clusters.
:param c1_size: The size of the first cluster
:param c2_size: The size of the second cluster
:param prob: The probability of an edge between the clusters
:param same_cluster: Whether these are the same cluster
:param self_loops: Whether we will generate self loops
:param directed: Whether we are generating a directed graph
:return: the number of edges to generate between these clusters
"""
# We need to compute the number of possible edges
possible_edges_between_clusters = _get_num_pos_edges(c1_size, c2_size, same_cluster, self_loops, directed)
# Sample the number of edges from the binomial distribution
return np.random.binomial(possible_edges_between_clusters, prob)
def _generate_sbm_edges(cluster_sizes, prob_mat_q, directed=False):
"""
Given a list of cluster sizes, and a square matrix Q, generates edges for a graph in the following way.
For two vertices u and v where u is in cluster i and v is in cluster j, there is an edge between u and v with
probability Q_{i, j}.
For the undirected case, we assume that the matrix Q is symmetric (and in practice look only at the upper triangle).
For the directed case, we generate edges (u, v) and (v, u) with probabilities Q_{i, j} and Q_{j, i} respectively.
May return self-loops. The calling code can decide what to do with them.
Returns edges as pairs (u, v) where u and v are integers giving the index of the respective vertices.
:param cluster_sizes: a list giving the number of vertices in each cluster
:param prob_mat_q: A square matrix where Q_{i, j} is the probability of each edge between clusters i and j. Should
be symmetric in the undirected case.
:param directed: Whether to generate a directed graph (default is false).
:return: Edges (u, v).
"""
# We will iterate over the clusters. This variable keeps track of the index of the first vertex in the current
# cluster_1.
c1_base_index = 0
for cluster_1 in range(len(cluster_sizes)):
# Keep track of the index of the first vertex in the current cluster_2
c2_base_index = c1_base_index
# If we are constructing a directed graph, we need to consider all values of cluster_2.
# Otherwise, we will consider only the clusters with an index >= cluster_1.
if directed:
second_clusters = range(len(cluster_sizes))
c2_base_index = 0
else:
second_clusters = range(cluster_1, len(cluster_sizes))
for cluster_2 in second_clusters:
# Compute the number of edges between these two clusters
num_edges = _get_number_of_edges(cluster_sizes[cluster_1],
cluster_sizes[cluster_2],
prob_mat_q[cluster_1][cluster_2],
cluster_1 == cluster_2,
True,
directed)
# Sample this number of edges. TODO: correct for possible double-sampling of edges
num_possible_edges = (cluster_sizes[cluster_1] * cluster_sizes[cluster_2]) - 1
for i in range(num_edges):
edge_idx = random.randint(0, num_possible_edges)
u = c1_base_index + int(edge_idx / cluster_sizes[cluster_1])
v = c2_base_index + (edge_idx % cluster_sizes[cluster_1])
yield u, v
# Update the base index for the second cluster
c2_base_index += cluster_sizes[cluster_2]
# Update the base index of this cluster
c1_base_index += cluster_sizes[cluster_1]
def sbm_adjmat(cluster_sizes, prob_mat_q, directed=False, self_loops=False):
"""
Generate a graph from the stochastic block model.
The list cluster_sizes gives the number of vertices inside each cluster and the matrix Q gives the probability of
each edge between pairs of clusters.
For two vertices u and v where u is in cluster i and v is in cluster j, there is an edge between u and v with
probability Q_{i, j}.
For the undirected case, we assume that the matrix Q is symmetric (and in practice look only at the upper triangle).
For the directed case, we generate edges (u, v) and (v, u) with probabilities Q_{i, j} and Q_{j, i} respectively.
Returns the adjacency matrix of the graph as a sparse scipy matrix in the CSR format.
:param cluster_sizes: The number of vertices in each cluster.
:param prob_mat_q: A square matrix where Q_{i, j} is the probability of each edge between clusters i and j. Should
be symmetric in the undirected case.
:param directed: Whether to generate a directed graph (default is false).
:param self_loops: Whether to generate self-loops (default is false).
:return: The sparse adjacency matrix of the graph.
"""
# Initialize the adjacency matrix
adj_mat = scipy.sparse.lil_matrix((sum(cluster_sizes), sum(cluster_sizes)))
# Generate the edges in the graph
for (u, v) in _generate_sbm_edges(cluster_sizes, prob_mat_q, directed=directed):
if u != v or self_loops:
# Add this edge to the adjacency matrix.
adj_mat[u, v] = 1
if not directed:
adj_mat[v, u] = 1
# Reformat the output matrix to the CSR format
return adj_mat.tocsr()
def sbm_adjmat_equal_clusters(n, k, prob_mat_q, directed=False):
"""
Generate a graph from the general stochastic block model.
Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of
each edge inside a cluster is given by the probability matrix Q.
:param n: The number of vertices in the graph.
:param k: The number of clusters.
:param prob_mat_q: q[i][j] gives the probability of an edge between clusters i and j
:param directed: Whether to generate a directed graph.
:return: The sparse adjacency matrix of the graph.
"""
return sbm_adjmat([int(n/k)] * k, prob_mat_q, directed=directed)
def ssbm_adjmat(n, k, p, q, directed=False):
"""
Generate a graph from the symmetric stochastic block model.
Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of
each edge inside a cluster is given by p. The probability of an edge between two different clusters is q.
:param n: The number of vertices in the graph.
:param k: The number of clusters.
:param p: The probability of an edge inside a cluster.
:param q: The probability of an edge between clusters.
:param directed: Whether to generate a directed graph.
:return: The sparse adjacency matrix of the graph.
"""
# Every cluster has the same size.
cluster_sizes = [int(n/k)] * k
# Construct the k*k probability matrix Q. The off-diagonal entries are all q and the diagonal entries are all p.
prob_mat_q = []
for row_num in range(k):
new_row = [q] * k
new_row[row_num] = p
prob_mat_q.append(new_row)
# Call the general sbm method.
return sbm_adjmat(cluster_sizes, prob_mat_q, directed=directed)
|
[
"random.randint",
"numpy.random.binomial"
] |
[((2449, 2506), 'numpy.random.binomial', 'np.random.binomial', (['possible_edges_between_clusters', 'prob'], {}), '(possible_edges_between_clusters, prob)\n', (2467, 2506), True, 'import numpy as np\n'), ((5081, 5118), 'random.randint', 'random.randint', (['(0)', 'num_possible_edges'], {}), '(0, num_possible_edges)\n', (5095, 5118), False, 'import random\n')]
|
#!/usr/bin/python3
"""
| --------------------- Py include <Mauro Baladés> ---------------------
| ___ _ _ _ __ _ _ ___ ____
| | |_) \ \_/ | | | |\ | / /` | | | | | | | \ | |_
| |_| |_| |_| |_| \| \_\_, |_|__ \_\_/ |_|_/ |_|__
| ----------------------------------------------------------------------
| MIT License
|
| Copyright (c) 2022 <NAME>
|
| Permission is hereby granted, free of charge, to any person obtaining a copy
| of this software and associated documentation files (the "Software"), to deal
| in the Software without restriction, including without limitation the rights
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
| copies of the Software, and to permit persons to whom the Software is
| furnished to do so, subject to the following conditions:
|
| The above copyright notice and this permission notice shall be included in all
| copies or substantial portions of the Software.
|
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
| SOFTWARE.
|
"""
from pathlib import Path
import sys
def _exec_modules(*args, **kwargs):
# Get locals from kwargs
local = kwargs.get("local", None)
# Check if local is None,
# because user did not define it.
if local is None:
raise Exception("Need to pass the local variable")
# Iterate every path that user gives as
# arguments (stored in *args).
for arg in args:
# Store the path into a
# platform specific-path
path = Path(arg)
# Open the file and get it's
# content
with open(path, "r") as f:
data = f.read()
# Execute the file content.
exec(data, globals(), local)
def _ret_modules(*args, **kwargs):
pass
def include(*args, **kwargs):
"""Here is where all the magic ocour. This function takes an
infinite amount of paths and they are being executend to
feel like user imported it.
Note:
It can also be used to store it into a variable if user
needs it. This can be done by adding the argument `ret`
to True (more detail in #Args).
Note:
Please note how (for the import statement) you will need a
`__init__.py` and paths separated by dots. With py-include,
you don't need. Py-include will make your path supported
by the current platform and it will open it's content and
execute it, so you don't need a path divided by `.` or
a `__init__.py`
Args:
files [list(str)]: A list of paths to include.
ret [bool]: If it is set to True, return the module (defaults to False).
Note:
If `ret` is set to `True`, the function will return all modules
as user will need to unpack them.
"""
# Get the value whether user whan't to execute
# the module or to return it. (defaults to False)
ret = kwargs.get("ret", False)
# Check if user inserted `ret` as True. If it not,
# we will open the file and execute it's content.
# If it is True, we will return the module they
# whanted to import.
if not ret:
_exec_modules(*args, **kwargs)
return _ret_modules(*args, **kwargs)
|
[
"pathlib.Path"
] |
[((1907, 1916), 'pathlib.Path', 'Path', (['arg'], {}), '(arg)\n', (1911, 1916), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
"""dependenpy finder module."""
from importlib.util import find_spec
from os.path import basename, exists, isdir, isfile, join, splitext
class PackageSpec(object):
"""Holder for a package specification (given as argument to DSM)."""
def __init__(self, name, path, limit_to=None):
"""
Initialization method.
Args:
name (str): name of the package.
path (str): path to the package.
limit_to (list of str): limitations.
"""
self.name = name
self.path = path
self.limit_to = limit_to or []
def __hash__(self):
return hash((self.name, self.path))
@property
def ismodule(self):
"""Property to tell if the package is in fact a module (a file)."""
return self.path.endswith(".py")
def add(self, spec):
"""
Add limitations of given spec to self's.
Args:
spec (PackageSpec): another spec.
"""
for limit in spec.limit_to:
if limit not in self.limit_to:
self.limit_to.append(limit)
@staticmethod
def combine(specs):
"""
Combine package specifications' limitations.
Args:
specs (list of PackageSpec): the package specifications.
Returns:
list of PackageSpec: the new, merged list of PackageSpec.
"""
new_specs = {}
for spec in specs:
if new_specs.get(spec, None) is None:
new_specs[spec] = spec
else:
new_specs[spec].add(spec)
return list(new_specs.values())
class PackageFinder(object):
"""Abstract package finder class."""
def find(self, package, **kwargs):
"""
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
"""
raise NotImplementedError
class LocalPackageFinder(PackageFinder):
"""Finder to find local packages (directories on the disk)."""
def find(self, package, **kwargs):
"""
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
"""
if not exists(package):
return None
name, path = None, None
enforce_init = kwargs.pop("enforce_init", True)
if isdir(package):
if isfile(join(package, "__init__.py")) or not enforce_init:
name, path = basename(package), package
elif isfile(package) and package.endswith(".py"):
name, path = splitext(basename(package))[0], package
if name and path:
return PackageSpec(name, path)
return None
class InstalledPackageFinder(PackageFinder):
"""Finder to find installed Python packages using importlib."""
def find(self, package, **kwargs):
"""
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
"""
spec = find_spec(package)
if spec is None:
return None
limit = []
if "." in package:
package, limit = package.split(".", 1)
limit = [limit]
spec = find_spec(package)
if spec is not None:
if spec.submodule_search_locations:
path = spec.submodule_search_locations[0]
elif spec.origin and spec.origin != "built-in":
path = spec.origin
else:
return None
return PackageSpec(spec.name, path, limit)
return None
class Finder(object):
"""
Main package finder class.
Initialize it with a list of package finder classes (not instances).
"""
def __init__(self, finders=None):
"""
Initialization method.
Args:
finders (list of classes):
list of package finder classes (not instances) in a specific
order. Default: [LocalPackageFinder, InstalledPackageFinder].
"""
if finders is None:
self.finders = [LocalPackageFinder(), InstalledPackageFinder()]
else:
self.finders = [f() for f in finders]
def find(self, package, **kwargs):
"""
Find a package using package finders.
Return the first package found.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments used by finders.
Returns:
PackageSpec: if package found, else None
"""
for finder in self.finders:
package_spec = finder.find(package, **kwargs)
if package_spec:
return package_spec
return None
|
[
"os.path.exists",
"importlib.util.find_spec",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"os.path.basename"
] |
[((2619, 2633), 'os.path.isdir', 'isdir', (['package'], {}), '(package)\n', (2624, 2633), False, 'from os.path import basename, exists, isdir, isfile, join, splitext\n'), ((3403, 3421), 'importlib.util.find_spec', 'find_spec', (['package'], {}), '(package)\n', (3412, 3421), False, 'from importlib.util import find_spec\n'), ((2479, 2494), 'os.path.exists', 'exists', (['package'], {}), '(package)\n', (2485, 2494), False, 'from os.path import basename, exists, isdir, isfile, join, splitext\n'), ((3615, 3633), 'importlib.util.find_spec', 'find_spec', (['package'], {}), '(package)\n', (3624, 3633), False, 'from importlib.util import find_spec\n'), ((2777, 2792), 'os.path.isfile', 'isfile', (['package'], {}), '(package)\n', (2783, 2792), False, 'from os.path import basename, exists, isdir, isfile, join, splitext\n'), ((2657, 2685), 'os.path.join', 'join', (['package', '"""__init__.py"""'], {}), "(package, '__init__.py')\n", (2661, 2685), False, 'from os.path import basename, exists, isdir, isfile, join, splitext\n'), ((2737, 2754), 'os.path.basename', 'basename', (['package'], {}), '(package)\n', (2745, 2754), False, 'from os.path import basename, exists, isdir, isfile, join, splitext\n'), ((2856, 2873), 'os.path.basename', 'basename', (['package'], {}), '(package)\n', (2864, 2873), False, 'from os.path import basename, exists, isdir, isfile, join, splitext\n')]
|
import math
def contfractbeta(a: float, b: float, x: float, itmax: int = 200) -> float:
# https://malishoaib.wordpress.com/2014/04/15/the-beautiful-beta-functions-in-raw-python/
# evaluates the continued fraction form of the incomplete Beta function; incompbeta()
# code translated from: Numerical Recipes in C
eps = 3.0e-7
bm = az = am = 1.0
qab = a + b
qap = a + 1.0
qam = a - 1.0
bz = 1.0 - qab * x / qap
for i in range(itmax + 1):
em = float(i + 1)
tem = em + em
d = em * (b - em) * x / ((qam + tem) * (a + tem))
ap = az + d * am
bp = bz + d * bm
d = -(a + em) * (qab + em) * x / ((qap + tem) * (a + tem))
app = ap + d * az
bpp = bp + d * bz
aold = az
am = ap / bpp
bm = bp / bpp
az = app / bpp
bz = 1.0
if abs(az - aold) < (eps * abs(az)):
return az
message = 'a or b too large or given itmax too small for computing incomplete beta function.'
raise ValueError(message)
def incompbeta(a: float, b: float, x: float) -> float:
# https://malishoaib.wordpress.com/2014/04/15/the-beautiful-beta-functions-in-raw-python/
# evaluates incomplete beta function, here a, b > 0 and 0 <= x <= 1
# this function requires contfractbeta(a,b,x, itmax = 200)
# code translated from: Numerical Recipes in C
if x == 0 or x == 1:
return x
else:
lbeta = math.lgamma(a + b) - math.lgamma(a) - math.lgamma(b) + a * math.log(x) + b * math.log(1 - x)
if x < (a + 1) / (a + b + 2):
return math.exp(lbeta) * contfractbeta(a, b, x) / a
else:
return 1 - math.exp(lbeta) * contfractbeta(b, a, 1 - x) / b
|
[
"math.exp",
"math.lgamma",
"math.log"
] |
[((1534, 1549), 'math.log', 'math.log', (['(1 - x)'], {}), '(1 - x)\n', (1542, 1549), False, 'import math\n'), ((1495, 1509), 'math.lgamma', 'math.lgamma', (['b'], {}), '(b)\n', (1506, 1509), False, 'import math\n'), ((1516, 1527), 'math.log', 'math.log', (['x'], {}), '(x)\n', (1524, 1527), False, 'import math\n'), ((1607, 1622), 'math.exp', 'math.exp', (['lbeta'], {}), '(lbeta)\n', (1615, 1622), False, 'import math\n'), ((1457, 1475), 'math.lgamma', 'math.lgamma', (['(a + b)'], {}), '(a + b)\n', (1468, 1475), False, 'import math\n'), ((1478, 1492), 'math.lgamma', 'math.lgamma', (['a'], {}), '(a)\n', (1489, 1492), False, 'import math\n'), ((1689, 1704), 'math.exp', 'math.exp', (['lbeta'], {}), '(lbeta)\n', (1697, 1704), False, 'import math\n')]
|
__all__ = ["Binwalk"]
import os
import re
import time
import magic
from binwalk.compat import *
from binwalk.config import *
from binwalk.update import *
from binwalk.filter import *
from binwalk.parser import *
from binwalk.plugins import *
from binwalk.plotter import *
from binwalk.hexdiff import *
from binwalk.entropy import *
from binwalk.extractor import *
from binwalk.prettyprint import *
from binwalk.smartstrings import *
from binwalk.smartsignature import *
from binwalk.common import file_size, unique_file_name, BlockFile
class Binwalk(object):
'''
Primary Binwalk class.
Useful class objects:
self.filter - An instance of the MagicFilter class.
self.extractor - An instance of the Extractor class.
self.parser - An instance of the MagicParser class.
self.display - An instance of the PrettyPrint class.
self.magic_files - A list of magic file path strings to use whenever the scan() method is invoked.
self.scan_length - The total number of bytes to be scanned.
self.total_scanned - The number of bytes that have already been scanned.
self.scan_type - The type of scan being performed, one of: BINWALK, BINCAST, BINARCH, STRINGS, ENTROPY.
Performing a simple binwalk scan:
from binwalk import Binwalk
scan = Binwalk().scan(['firmware1.bin', 'firmware2.bin'])
for (filename, file_results) in scan.iteritems():
print "Results for %s:" % filename
for (offset, results) in file_results:
for result in results:
print offset, result['description']
'''
# Default libmagic flags. Basically disable anything we don't need in the name of speed.
DEFAULT_FLAGS = magic.MAGIC_NO_CHECK_TEXT | magic.MAGIC_NO_CHECK_ENCODING | magic.MAGIC_NO_CHECK_APPTYPE | magic.MAGIC_NO_CHECK_TOKENS
# Maximum magic bytes length
MAX_SIGNATURE_SIZE = 128
# Minimum verbosity level at which to enable extractor verbosity.
VERY_VERBOSE = 2
# Scan every byte by default.
DEFAULT_BYTE_ALIGNMENT = 1
# Valid scan_type values.
# ENTROPY must be the largest value to ensure it is performed last if multiple scans are performed.
# REHASH must also be larger than any scans that would generate extracted files.
BINWALK = 0x01
BINARCH = 0x02
BINCAST = 0x03
STRINGS = 0x04
COMPRESSION = 0x05
HEXDIFF = 0x06
CUSTOM = 0x07
REHASH = 0x08
BINVIS = 0x09
ENTROPY = 0x0A
def __init__(self, magic_files=[], flags=magic.MAGIC_NONE, log=None, quiet=False, verbose=0, ignore_smart_keywords=False, ignore_time_skews=False, load_extractor=False, load_plugins=True, exec_commands=True, max_extract_size=None):
'''
Class constructor.
@magic_files - A list of magic files to use.
@flags - Flags to pass to magic_open. [TODO: Might this be more appropriate as an argument to load_signaures?]
@log - Output PrettyPrint data to log file as well as to stdout.
@quiet - If set to True, supress PrettyPrint output to stdout.
@verbose - Verbosity level.
@ignore_smart_keywords - Set to True to ignore smart signature keywords.
@ignore_time_skews - Set to True to ignore file results with timestamps in the future.
@load_extractor - Set to True to load the default extraction rules automatically.
@load_plugins - Set to False to disable plugin support.
@exec_commands - Set to False to disable the execution of external utilities when extracting data from files.
@max_extract_size - Limit the size of extracted files.
Returns None.
'''
self.flags = self.DEFAULT_FLAGS | flags
self.last_extra_data_section = ''
self.load_plugins = load_plugins
self.magic_files = magic_files
self.verbose = verbose
self.total_scanned = 0
self.scan_length = 0
self.total_read = 0
self.matryoshka = 1
self.epoch = 0
self.year = 0
self.plugins = None
self.magic = None
self.mfile = None
self.entropy = None
self.strings = None
self.scan_type = self.BINWALK
if not ignore_time_skews:
# Consider timestamps up to 1 year in the future valid,
# to account for any minor time skew on the local system.
self.year = time.localtime().tm_year + 1
self.epoch = int(time.time()) + (60 * 60 * 24 * 365)
# Instantiate the config class so we can access file/directory paths
self.config = Config()
# Use the system default magic file if no other was specified
if not self.magic_files or self.magic_files is None:
# Append the user's magic file first so that those signatures take precedence
self.magic_files = [
self.config.paths['user'][self.config.BINWALK_MAGIC_FILE],
self.config.paths['system'][self.config.BINWALK_MAGIC_FILE],
]
# Only set the extractor verbosity if told to be very verbose
if self.verbose >= self.VERY_VERBOSE:
extractor_verbose = True
else:
extractor_verbose = False
# Create an instance of the PrettyPrint class, which can be used to print results to screen/file.
self.display = PrettyPrint(self, log=log, quiet=quiet, verbose=verbose)
# Create MagicFilter and Extractor class instances. These can be used to:
#
# o Create include/exclude filters
# o Specify file extraction rules to be applied during a scan
#
self.filter = MagicFilter()
self.extractor = Extractor(verbose=extractor_verbose, exec_commands=exec_commands, max_size=max_extract_size)
if load_extractor:
self.extractor.load_defaults()
# Create SmartSignature and MagicParser class instances. These are mostly for internal use.
self.smart = SmartSignature(self.filter, ignore_smart_signatures=ignore_smart_keywords)
self.parser = MagicParser(self.filter, self.smart)
def __del__(self):
self.cleanup()
def __enter__(self):
return self
def __exit__(self, t, v, traceback):
self.cleanup()
def cleanup(self):
'''
Close magic and cleanup any temporary files generated by the internal instance of MagicParser.
Returns None.
'''
try:
self.magic.close()
except:
pass
try:
self.parser.cleanup()
except:
pass
def load_signatures(self, magic_files=[]):
'''
Load signatures from magic file(s).
Called automatically by Binwalk.scan() with all defaults, if not already called manually.
@magic_files - A list of magic files to use (default: self.magic_files).
Returns None.
'''
# The magic files specified here override any already set
if magic_files and magic_files is not None:
self.magic_files = magic_files
# Parse the magic file(s) and initialize libmagic
self.mfile = self.parser.parse(self.magic_files)
self.magic = magic.open(self.flags)
self.magic.load(str2bytes(self.mfile))
# Once the temporary magic file is loaded into libmagic, we don't need it anymore; delete the temp file
self.parser.rm_magic_file()
def hexdiff(self, file_names, length=0x100, offset=0, block=16, first=False):
if not length and len(file_names) > 0:
length = file_size(file_names[0])
if not block:
block = 16
HexDiff(self).display(file_names, offset=offset, size=length, block=block, show_first_only=first)
def analyze_strings(self, file_names, length=0, offset=0, n=0, block=0, load_plugins=True, whitelist=[], blacklist=[]):
'''
Performs a strings analysis on the specified file(s).
@file_names - A list of files to analyze.
@length - The number of bytes in the file to analyze.
@offset - The starting offset into the file to begin analysis.
@n - The minimum valid string length.
@block - The block size to use when performing entropy analysis.
@load_plugins - Set to False to disable plugin callbacks.
@whitelist - A list of whitelisted plugins.
@blacklist - A list of blacklisted plugins.
Returns a dictionary compatible with other classes and methods (Entropy, Binwalk, analyze_entropy, etc):
{
'file_name' : (offset, [{
'description' : 'Strings',
'string' : 'found_string'
}]
)
}
'''
data = {}
self.strings = Strings(file_names,
self,
length=length,
offset=offset,
n=n,
block=block,
algorithm='gzip', # Use gzip here as it is faster and we don't need the detail provided by shannon
load_plugins=load_plugins,
whitelist=whitelist,
blacklist=blacklist)
data = self.strings.strings()
del self.strings
self.strings = None
return data
def analyze_entropy(self, files, offset=0, length=0, block=0, plot=True, legend=True, save=False, algorithm=None, load_plugins=True, whitelist=[], blacklist=[], compcheck=False):
'''
Performs an entropy analysis on the specified file(s).
@files - A dictionary containing file names and results data, as returned by Binwalk.scan.
@offset - The offset into the data to begin analysis.
@length - The number of bytes to analyze.
@block - The size of the data blocks to analyze.
@plot - Set to False to disable plotting.
@legend - Set to False to exclude the legend and custom offset markers from the plot.
@save - Set to True to save plots to disk instead of displaying them.
@algorithm - Set to 'gzip' to use the gzip entropy "algorithm".
@load_plugins - Set to False to disable plugin callbacks.
@whitelist - A list of whitelisted plugins.
@blacklist - A list of blacklisted plugins.
@compcheck - Set to True to perform heuristic compression detection.
Returns a dictionary of:
{
'file_name' : ([list, of, offsets], [list, of, entropy], average_entropy)
}
'''
data = {}
self.entropy = Entropy(files,
self,
offset,
length,
block,
plot,
legend,
save,
algorithm=algorithm,
load_plugins=plugins,
whitelist=whitelist,
blacklist=blacklist,
compcheck=compcheck)
data = self.entropy.analyze()
del self.entropy
self.entropy = None
return data
def plot3d(self, target_files, offset=0, length=0, max_points=None, show_grids=False, verbose=False):
'''
Generates a 3D data plot of the specified target files.
@target_files - File or list of files to scan.
@offset - Starting offset at which to start the scan.
@length - Number of bytes to scan. Specify 0 to scan the entire file(s).
@max_points - Set the maximum number of data points to plot.
@show_grids - Set to True to show axis grids in the 3D plot.
@verbose - Set to True to enable verbose output.
Returns None.
'''
if not isinstance(target_files, type([])):
target_files = [target_files]
Plotter3D(target_files, offset=offset, length=length, max_points=max_points, show_grids=show_grids, verbose=verbose).plot()
def plot2d(self, target_files, offset=0, length=0, max_points=None, show_grids=False, verbose=False):
'''
Generates a 2D data plot of the specified target files.
@target_files - File or list of files to scan.
@offset - Starting offset at which to start the scan.
@length - Number of bytes to scan. Specify 0 to scan the entire file(s).
@max_points - Set the maximum number of data points to plot.
@show_grids - Set to True to show axis grids in the 3D plot.
@verbose - Set to True to enable verbose output.
Returns None.
'''
if not isinstance(target_files, type([])):
target_files = [target_files]
Plotter2D(target_files, offset=offset, length=length, max_points=max_points, show_grids=show_grids, verbose=verbose).plot()
def scan(self, target_files, offset=0, length=0, show_invalid_results=False, callback=None, start_callback=None, end_callback=None, base_dir=None, matryoshka=1, plugins_whitelist=[], plugins_blacklist=[]):
'''
Performs a binwalk scan on a file or list of files.
@target_files - File or list of files to scan.
@offset - Starting offset at which to start the scan.
@length - Number of bytes to scan. Specify -1 for streams.
@show_invalid_results - Set to True to display invalid results.
@callback - Callback function to be invoked when matches are found.
@start_callback - Callback function to be invoked prior to scanning each file.
@end_callback - Callback function to be invoked after scanning each file.
@base_dir - Base directory for output files.
@matryoshka - Number of levels to traverse into the rabbit hole.
@plugins_whitelist - A list of plugin names to load. If not empty, only these plugins will be loaded.
@plugins_blacklist - A list of plugin names to not load.
Returns a dictionary of :
{
'target file name' : [
(0, [{description : "LZMA compressed data..."}]),
(112, [{description : "gzip compressed data..."}])
]
}
'''
# Prefix all directory names with an underscore. This prevents accidental deletion of the original file(s)
# when the user is typing too fast and is trying to deleted the extraction directory.
prefix = '_'
dir_extension = 'extracted'
i = 0
total_results = {}
self.matryoshka = matryoshka
# For backwards compatibility
if not isinstance(target_files, type([])):
target_files = [target_files]
if base_dir is None:
base_dir = ''
# Instantiate the Plugins class and load all plugins, if not disabled
self.plugins = Plugins(self, whitelist=plugins_whitelist, blacklist=plugins_blacklist)
if self.load_plugins:
self.plugins._load_plugins()
# Load the magic signatures. This must be done for every scan, as some signature scans
# may use a different list of magic signatures.
self.load_signatures()
while i < self.matryoshka:
new_target_files = []
# Scan each target file
for target_file in target_files:
ignore_files = []
# On the first scan, add the base_dir value to dir_prefix. Subsequent target_file values will have this value prepended already.
if i == 0:
dir_prefix = os.path.join(base_dir, prefix + os.path.basename(target_file))
else:
dir_prefix = os.path.join(os.path.dirname(target_file), prefix + os.path.basename(target_file))
output_dir = unique_file_name(dir_prefix, dir_extension)
# Set the output directory for extracted files to go to
self.extractor.output_directory(output_dir)
if start_callback is not None:
start_callback(target_file)
results = self.single_scan(target_file,
offset=offset,
length=length,
show_invalid_results=show_invalid_results,
callback=callback)
if end_callback is not None:
end_callback(target_file)
# Get a list of extracted file names; don't scan them again.
for (index, results_list) in results:
for result in results_list:
if result['extract']:
ignore_files.append(result['extract'])
# Find all newly created files and add them to new_target_files / new_target_directories
for (dir_path, sub_dirs, files) in os.walk(output_dir):
for fname in files:
fname = os.path.join(dir_path, fname)
if fname not in ignore_files:
new_target_files.append(fname)
# Don't worry about sub-directories
break
total_results[target_file] = results
target_files = new_target_files
i += 1
# Be sure to delete the Plugins instance so that there isn't a lingering reference to
# this Binwalk class instance (lingering handles to this Binwalk instance cause the
# __del__ deconstructor to not be called).
if self.plugins is not None:
del self.plugins
self.plugins = None
return total_results
def single_scan(self, target_file='', fd=None, offset=0, length=0, show_invalid_results=False, callback=None, plugins_whitelist=[], plugins_blacklist=[]):
'''
Performs a binwalk scan on one target file or file descriptor.
@target_file - File to scan.
@fd - A common.BlockFile object.
@offset - Starting offset at which to start the scan.
@length - Number of bytes to scan. Specify -1 for streams.
@show_invalid_results - Set to True to display invalid results.
@callback - Callback function to be invoked when matches are found.
@plugins_whitelist - A list of plugin names to load. If not empty, only these plugins will be loaded.
@plugins_blacklist - A list of plugin names to not load.
The callback function is passed two arguments: a list of result dictionaries containing the scan results
(one result per dict), and the offset at which those results were identified. Example callback function:
def my_callback(offset, results):
print "Found %d results at offset %d:" % (len(results), offset)
for result in results:
print "\t%s" % result['description']
binwalk.Binwalk(callback=my_callback).scan("firmware.bin")
Upon completion, the scan method returns a sorted list of tuples containing a list of results dictionaries
and the offsets at which those results were identified:
scan_results = [
(0, [{description : "LZMA compressed data..."}]),
(112, [{description : "gzip compressed data..."}])
]
See SmartSignature.parse for a more detailed description of the results dictionary structure.
'''
scan_results = {}
fsize = 0
jump_offset = 0
i_opened_fd = False
i_loaded_plugins = False
plugret = PLUGIN_CONTINUE
plugret_start = PLUGIN_CONTINUE
self.total_read = 0
self.total_scanned = 0
self.scan_length = length
self.filter.show_invalid_results = show_invalid_results
self.start_offset = offset
# Check to make sure either a target file or a file descriptor was supplied
if not target_file and fd is None:
raise Exception("Must supply Binwalk.single_scan with a valid file path or BlockFile object")
# Need the total size of the target file, even if we aren't scanning the whole thing
if target_file:
fsize = file_size(target_file)
# If no length was specified, make the length the size of the target file minus the starting offset
if self.scan_length == 0:
self.scan_length = fsize - offset
# Open the target file and seek to the specified start offset
if fd is None:
fd = BlockFile(target_file, length=self.scan_length, offset=offset)
i_opened_fd = True
# If offset is negative (bytes from EOF), BlockFile class will autmoatically calculate the right offset
offset = fd.offset
# Seek to the starting offset.
#fd.seek(offset)
# If the Plugins class has not already been instantitated, do that now.
if self.plugins is None:
self.plugins = Plugins(self, blacklist=plugins_blacklist, whitelist=plugins_whitelist)
i_loaded_plugins = True
if self.load_plugins:
self.plugins._load_plugins()
# Invoke any pre-scan plugins
plugret_start = self.plugins._pre_scan_callbacks(fd)
# Load the magic signatures if they weren't already loaded.
if not self.magic:
self.load_signatures()
# Main loop, scan through all the data
while not ((plugret | plugret_start) & PLUGIN_TERMINATE):
i = 0
# Read in the next block of data from the target file and make sure it's valid
(data, dlen) = fd.read_block()
if not data or dlen == 0:
break
# The total number of bytes scanned could be bigger than the total number
# of bytes read from the file if the previous signature result specified a
# jump offset that was beyond the end of the then current data block.
#
# If this is the case, we need to index into this data block appropriately in order to
# resume the scan from the appropriate offset.
#
# Don't update dlen though, as it is the literal offset into the data block that we
# are to scan up to in this loop iteration. It is also appended to self.total_scanned,
# which is what we want (even if we have been told to skip part of the block, the skipped
# part is still considered part of the total bytes scanned).
if jump_offset > 0:
total_check = self.total_scanned + dlen
# Is the jump offset beyond the total amount of data that we've currently read in (i.e., in a future data block)?
if jump_offset >= total_check:
i = -1
# Try to seek to the jump offset; this won't work if fd == sys.stdin
try:
fd.seek(jump_offset)
self.total_read = jump_offset
self.total_scanned = jump_offset - dlen
except:
pass
# Is the jump offset inside this block of data?
elif jump_offset > self.total_scanned and jump_offset < total_check:
# Index into this block appropriately; jump_offset is the file offset that
# we need to jump to, and self.total_scanned is the file offset that starts
# the beginning of the current block
i = jump_offset - self.total_scanned
# We're done with jump_offset, zero it out for the next round
jump_offset = 0
# Scan through each block of data looking for signatures
if i >= 0 and i < dlen:
# Scan this data block for a list of offsets which are candidates for possible valid signatures.
# Signatures could be split across the block boundary; since data conatins 1KB more than dlen,
# pass up to dlen+MAX_SIGNATURE_SIZE to find_signature_candidates, but don't accept signatures that
# start after the end of dlen.
for candidate in self.parser.find_signature_candidates(data[i:dlen+self.MAX_SIGNATURE_SIZE], (dlen-i)):
# If a previous signature specified a jump offset beyond this candidate signature offset, ignore it
if (i + candidate + self.total_scanned) < jump_offset:
continue
# Reset these values on each loop
smart = {}
results = []
results_offset = -1
# In python3 we need a bytes object to pass to magic.buffer
candidate_data = str2bytes(data[i+candidate:i+candidate+fd.MAX_TRAILING_SIZE])
# Pass the data to libmagic, and split out multiple results into a list
for magic_result in self.parser.split(self.magic.buffer(candidate_data)):
i_set_results_offset = False
# Some signatures need to take into account the length of a given string
# when specifying additional offsets. Parse the string-len keyword to adjust
# for this prior to calling self.smart.parse.
magic_result = self.smart._parse_string_len(magic_result)
# Some file names are not NULL byte terminated, but rather their length is
# specified in a size field. To ensure these are not marked as invalid due to
# non-printable characters existing in the file name, parse the filename(s) and
# trim them to the specified filename length, if one was specified.
magic_result = self.smart._parse_raw_strings(magic_result)
# Invoke any pre-parser callback plugin functions
if not (plugret_start & PLUGIN_STOP_PLUGINS):
raw_result = {'description' : magic_result}
plugret = self.plugins._scan_pre_parser_callbacks(raw_result)
magic_result = raw_result['description']
if (plugret & PLUGIN_TERMINATE):
break
# Make sure this is a valid result before further processing
if not self.filter.invalid(magic_result):
# The smart filter parser returns a dictionary of keyword values and the signature description.
smart = self.smart.parse(magic_result)
# Validate the jump value and check if the response description should be displayed
if self._is_valid(smart, candidate+i, fsize):
# If multiple results are returned and one of them has smart['jump'] set to a non-zero value,
# the calculated results offset will be wrong since i will have been incremented. Only set the
# results_offset value when the first match is encountered.
if results_offset < 0:
results_offset = offset + i + candidate + smart['adjust'] + self.total_scanned
i_set_results_offset = True
# Double check to make sure the smart['adjust'] value is sane.
# If it makes results_offset negative, then it is not sane.
if results_offset >= 0:
smart['offset'] = results_offset
# Invoke any scan plugins
if not (plugret_start & PLUGIN_STOP_PLUGINS):
plugret = self.plugins._scan_callbacks(smart)
results_offset = smart['offset']
if (plugret & PLUGIN_TERMINATE):
break
# Extract the result, if it matches one of the extract rules and is not a delayed extract.
if self.extractor.enabled and not (self.extractor.delayed and smart['delay']) and not ((plugret | plugret_start) & PLUGIN_NO_EXTRACT):
# If the signature did not specify a size, extract to the end of the file.
if not smart['size']:
smart['size'] = fsize-results_offset
smart['extract'] = self.extractor.extract( results_offset,
smart['description'],
target_file,
smart['size'],
name=smart['name'])
if not ((plugret | plugret_start) & PLUGIN_NO_DISPLAY):
# This appears to be a valid result, so append it to the results list.
results.append(smart)
elif i_set_results_offset:
results_offset = -1
# Did we find any valid results?
if results_offset >= 0:
scan_results[results_offset] = results
if callback is not None:
callback(results_offset, results)
# If a relative jump offset was specified, update the absolute jump_offset variable
if has_key(smart, 'jump') and smart['jump'] > 0:
jump_offset = results_offset + smart['jump']
# Track the total number of bytes scanned
self.total_scanned += dlen
# The starting offset only affects the reported offset for results
# in the first block of data. Zero it out after the first block has
# been processed.
offset = 0
# Sort the results before returning them
scan_items = list(scan_results.items())
scan_items.sort()
# Do delayed extraction, if specified.
if self.extractor.enabled and self.extractor.delayed:
scan_items = self.extractor.delayed_extract(scan_items, target_file, fsize)
# Invoke any post-scan plugins
#if not (plugret_start & PLUGIN_STOP_PLUGINS):
self.plugins._post_scan_callbacks(fd)
# Be sure to delete the Plugins instance so that there isn't a lingering reference to
# this Binwalk class instance (lingering handles to this Binwalk instance cause the
# __del__ deconstructor to not be called).
if i_loaded_plugins:
del self.plugins
self.plugins = None
if i_opened_fd:
fd.close()
return scan_items
def concatenate_results(self, results, new):
'''
Concatenate multiple Binwalk.scan results into one dictionary.
@results - Binwalk results to append new results to.
@new - New data to append to results.
Returns None.
'''
for (new_file_name, new_data) in iterator(new):
if not has_key(results, new_file_name):
results[new_file_name] = new_data
else:
for i in range(0, len(new_data)):
found_offset = False
(new_offset, new_results_list) = new_data[i]
for j in range(0, len(results[new_file_name])):
(offset, results_list) = results[new_file_name][j]
if offset == new_offset:
results_list += new_results_list
results[new_file_name][j] = (offset, results_list)
found_offset = True
break
if not found_offset:
results[new_file_name] += new_data
def _is_valid(self, result, location, file_size):
'''
Determines if a result string is valid and should be displayed to the user or not.
@result - Result dictionary, as returned by self.smart.parse.
@location - The file offset of the result.
@file_size - The total size of the file.
Returns True if the string should be displayed.
Returns False if the string should not be displayed.
'''
if self.filter.show_invalid_results:
return True
if result['invalid'] or result['jump'] < 0 or result['size'] < 0:
return False
if ((location + result['size']) > file_size) or (self.year and result['year'] > self.year) or (self.epoch and result['epoch'] > self.epoch):
return False
desc = result['description']
return (desc and desc is not None and not self.filter.invalid(desc) and self.filter.filter(desc) != self.filter.FILTER_EXCLUDE)
|
[
"binwalk.common.file_size",
"os.path.join",
"binwalk.common.BlockFile",
"os.path.dirname",
"os.path.basename",
"magic.open",
"time.localtime",
"binwalk.common.unique_file_name",
"time.time",
"os.walk"
] |
[((6570, 6592), 'magic.open', 'magic.open', (['self.flags'], {}), '(self.flags)\n', (6580, 6592), False, 'import magic\n'), ((6903, 6927), 'binwalk.common.file_size', 'file_size', (['file_names[0]'], {}), '(file_names[0])\n', (6912, 6927), False, 'from binwalk.common import file_size, unique_file_name, BlockFile\n'), ((17896, 17918), 'binwalk.common.file_size', 'file_size', (['target_file'], {}), '(target_file)\n', (17905, 17918), False, 'from binwalk.common import file_size, unique_file_name, BlockFile\n'), ((18180, 18242), 'binwalk.common.BlockFile', 'BlockFile', (['target_file'], {'length': 'self.scan_length', 'offset': 'offset'}), '(target_file, length=self.scan_length, offset=offset)\n', (18189, 18242), False, 'from binwalk.common import file_size, unique_file_name, BlockFile\n'), ((14178, 14221), 'binwalk.common.unique_file_name', 'unique_file_name', (['dir_prefix', 'dir_extension'], {}), '(dir_prefix, dir_extension)\n', (14194, 14221), False, 'from binwalk.common import file_size, unique_file_name, BlockFile\n'), ((14983, 15002), 'os.walk', 'os.walk', (['output_dir'], {}), '(output_dir)\n', (14990, 15002), False, 'import os\n'), ((4145, 4161), 'time.localtime', 'time.localtime', ([], {}), '()\n', (4159, 4161), False, 'import time\n'), ((4194, 4205), 'time.time', 'time.time', ([], {}), '()\n', (4203, 4205), False, 'import time\n'), ((14090, 14118), 'os.path.dirname', 'os.path.dirname', (['target_file'], {}), '(target_file)\n', (14105, 14118), False, 'import os\n'), ((15043, 15072), 'os.path.join', 'os.path.join', (['dir_path', 'fname'], {}), '(dir_path, fname)\n', (15055, 15072), False, 'import os\n'), ((14018, 14047), 'os.path.basename', 'os.path.basename', (['target_file'], {}), '(target_file)\n', (14034, 14047), False, 'import os\n'), ((14129, 14158), 'os.path.basename', 'os.path.basename', (['target_file'], {}), '(target_file)\n', (14145, 14158), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import os
import urllib.parse
from datetime import date, datetime
from functools import partial
from urllib.parse import quote_plus
import pandas as pd
import plotly.express as px
import pytz
from csci_utils.luigi.requires import Requirement, Requires
from csci_utils.luigi.target import TargetOutput
from django.template.loader import render_to_string
from luigi import (
DateParameter,
ExternalTask,
ListParameter,
LocalTarget,
Parameter,
Target,
Task,
)
from plotly.io import to_image
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from .models import Subscription
from .tasks_fetch import ConvertAQIFileToParquet
class UrlParameter(Parameter):
"""Descriptor to ensure that a file name is url safe i.e. quoted"""
def normalize(self, x):
return quote_plus(x)
class RowFilterTarget(Target):
"""A target class for filters on rows
Checks to see if any rows exist that satisfy the given filter
If no results found, return True (i.e. task is complete), else False
False - causes Luigi to think that task is pending and runs it + check requirements
"""
def __init__(self, model, **kwargs):
self.model = model
self.kwargs = kwargs
def exists(self):
vals = self.model.objects.filter(**self.kwargs)
if not vals:
return True
return False
class RowFilterOutput:
"""Descriptor for the output method
Returns a "RowFilterTarget" for the Luigi task
Additional feature: in case there are values returned from the filter,
descriptor can accept name of fields and parameters on the parent class
and update the parent class parameters -
this ensures that downstream tasks do not need to call the database again
"""
def __init__(self, model, entries_param=None, field=None, **kwargs):
self.model = model
entries_param = (
entries_param if isinstance(entries_param, list) else [entries_param]
)
field = field if isinstance(field, list) else [field]
self.parent_updates = dict(zip(entries_param, field))
self.kwargs = kwargs
def __get__(self, task, cls):
if not task:
return self
return partial(self.__call__, task)
def __call__(self, task):
vals = self.model.objects.filter(**self.kwargs)
if vals and self.parent_updates:
for entry, field in self.parent_updates.items():
setattr(task, entry, tuple(set(getattr(v, field) for v in vals)))
return RowFilterTarget(self.model, **self.kwargs)
class GenerateEmails(ExternalTask):
"""
Task to generate the html content to be sent via email.
Uses Django's render to string functionality.
:param city: name of the city for which report has to be generated
:param pol: name of the dominant pollutant for that city
:param date: the date for which report has to be generated
"""
city = UrlParameter(default=None)
pol = Parameter(default="pm25")
date = DateParameter(default=date.today())
requires = Requires()
historical = Requirement(ConvertAQIFileToParquet)
output = TargetOutput(
factory=LocalTarget,
file_pattern="emails/{task.city}-{task.date}",
ext=".html",
)
def run(self):
city = urllib.parse.unquote(self.city)
df = pd.read_parquet(self.historical.output().path)
df = df[df["City"] == city].sort_index(ascending=False)
df = df[df["Specie"].isin(["pm10", "pm25"])]
df = df.pivot(index=None, columns="Specie", values="median")
df.fillna(0, inplace=True)
df.sort_index(inplace=True, ascending=False)
last_7_days = df.iloc[:6]
data = {"aqi": df.iloc[0][self.pol]}
df["month"] = df.index.strftime("%Y-%m")
df_month = df.groupby("month").agg("mean")
last_7_days_bar = px.bar(last_7_days, title="Last 7 Days", barmode="group")
month_bar = px.bar(df_month, title="Monthly", barmode="group")
from base64 import b64encode
data["image_last_7_days"] = b64encode(
to_image(last_7_days_bar, format="png", engine="kaleido")
).decode()
data["image_months"] = b64encode(
to_image(month_bar, format="png", engine="kaleido")
).decode()
html = render_to_string(
"subscribe/newsletter_email_template.html", {"data": data}
)
with open(self.output().path, "w") as f:
f.write(html)
class CheckForPendingEmails(Task):
"""
Task to check for pending emails. This uses a "RowFilterOutput" which checks for rows in the database
which have the "next_email_date" in the past.
For each such row found (city + dominent pollutant fetched frm the DB), the task requires a GenerateEmails task.
"""
cities = ListParameter(default=None)
pols = ListParameter(default=None)
date = DateParameter(default=date.today())
def requires(self):
return {
k: self.clone(GenerateEmails, city=k, pol=self.pols[i])
for i, k in enumerate(self.cities)
}
output = RowFilterOutput(
model=Subscription,
entries_param=["cities", "pols"],
field=["city", "dominentpol"],
next_email_date__lte=datetime.now(tz=pytz.utc),
)
def run(self):
for city in self.cities:
vals = Subscription.objects.filter(
next_email_date__lte=datetime.now(tz=pytz.utc), city__exact=city
)
emails = list(map(lambda x: x.email, vals))
html = open(self.input()[city].path).read()
message = Mail(
from_email="<EMAIL>",
to_emails=emails[0],
subject=f"Daily AQI Update for {city} from WarmMail",
html_content=html,
)
try:
sg = SendGridAPIClient(os.environ.get("SENDGRID_API_KEY"))
sg.send(message)
except Exception as e:
print(e.message)
|
[
"csci_utils.luigi.requires.Requires",
"plotly.express.bar",
"sendgrid.helpers.mail.Mail",
"os.environ.get",
"csci_utils.luigi.target.TargetOutput",
"plotly.io.to_image",
"datetime.datetime.now",
"django.template.loader.render_to_string",
"functools.partial",
"luigi.ListParameter",
"datetime.date.today",
"luigi.Parameter",
"urllib.parse.quote_plus",
"csci_utils.luigi.requires.Requirement"
] |
[((3051, 3076), 'luigi.Parameter', 'Parameter', ([], {'default': '"""pm25"""'}), "(default='pm25')\n", (3060, 3076), False, 'from luigi import DateParameter, ExternalTask, ListParameter, LocalTarget, Parameter, Target, Task\n'), ((3140, 3150), 'csci_utils.luigi.requires.Requires', 'Requires', ([], {}), '()\n', (3148, 3150), False, 'from csci_utils.luigi.requires import Requirement, Requires\n'), ((3168, 3204), 'csci_utils.luigi.requires.Requirement', 'Requirement', (['ConvertAQIFileToParquet'], {}), '(ConvertAQIFileToParquet)\n', (3179, 3204), False, 'from csci_utils.luigi.requires import Requirement, Requires\n'), ((3219, 3317), 'csci_utils.luigi.target.TargetOutput', 'TargetOutput', ([], {'factory': 'LocalTarget', 'file_pattern': '"""emails/{task.city}-{task.date}"""', 'ext': '""".html"""'}), "(factory=LocalTarget, file_pattern=\n 'emails/{task.city}-{task.date}', ext='.html')\n", (3231, 3317), False, 'from csci_utils.luigi.target import TargetOutput\n'), ((4910, 4937), 'luigi.ListParameter', 'ListParameter', ([], {'default': 'None'}), '(default=None)\n', (4923, 4937), False, 'from luigi import DateParameter, ExternalTask, ListParameter, LocalTarget, Parameter, Target, Task\n'), ((4949, 4976), 'luigi.ListParameter', 'ListParameter', ([], {'default': 'None'}), '(default=None)\n', (4962, 4976), False, 'from luigi import DateParameter, ExternalTask, ListParameter, LocalTarget, Parameter, Target, Task\n'), ((852, 865), 'urllib.parse.quote_plus', 'quote_plus', (['x'], {}), '(x)\n', (862, 865), False, 'from urllib.parse import quote_plus\n'), ((2284, 2312), 'functools.partial', 'partial', (['self.__call__', 'task'], {}), '(self.__call__, task)\n', (2291, 2312), False, 'from functools import partial\n'), ((3953, 4010), 'plotly.express.bar', 'px.bar', (['last_7_days'], {'title': '"""Last 7 Days"""', 'barmode': '"""group"""'}), "(last_7_days, title='Last 7 Days', barmode='group')\n", (3959, 4010), True, 'import plotly.express as px\n'), ((4031, 4081), 'plotly.express.bar', 'px.bar', (['df_month'], {'title': '"""Monthly"""', 'barmode': '"""group"""'}), "(df_month, title='Monthly', barmode='group')\n", (4037, 4081), True, 'import plotly.express as px\n'), ((4396, 4472), 'django.template.loader.render_to_string', 'render_to_string', (['"""subscribe/newsletter_email_template.html"""', "{'data': data}"], {}), "('subscribe/newsletter_email_template.html', {'data': data})\n", (4412, 4472), False, 'from django.template.loader import render_to_string\n'), ((3110, 3122), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3120, 3122), False, 'from datetime import date, datetime\n'), ((5010, 5022), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5020, 5022), False, 'from datetime import date, datetime\n'), ((5360, 5385), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'pytz.utc'}), '(tz=pytz.utc)\n', (5372, 5385), False, 'from datetime import date, datetime\n'), ((5726, 5851), 'sendgrid.helpers.mail.Mail', 'Mail', ([], {'from_email': '"""<EMAIL>"""', 'to_emails': 'emails[0]', 'subject': 'f"""Daily AQI Update for {city} from WarmMail"""', 'html_content': 'html'}), "(from_email='<EMAIL>', to_emails=emails[0], subject=\n f'Daily AQI Update for {city} from WarmMail', html_content=html)\n", (5730, 5851), False, 'from sendgrid.helpers.mail import Mail\n'), ((4179, 4236), 'plotly.io.to_image', 'to_image', (['last_7_days_bar'], {'format': '"""png"""', 'engine': '"""kaleido"""'}), "(last_7_days_bar, format='png', engine='kaleido')\n", (4187, 4236), False, 'from plotly.io import to_image\n'), ((4310, 4361), 'plotly.io.to_image', 'to_image', (['month_bar'], {'format': '"""png"""', 'engine': '"""kaleido"""'}), "(month_bar, format='png', engine='kaleido')\n", (4318, 4361), False, 'from plotly.io import to_image\n'), ((5531, 5556), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'pytz.utc'}), '(tz=pytz.utc)\n', (5543, 5556), False, 'from datetime import date, datetime\n'), ((5982, 6016), 'os.environ.get', 'os.environ.get', (['"""SENDGRID_API_KEY"""'], {}), "('SENDGRID_API_KEY')\n", (5996, 6016), False, 'import os\n')]
|
#!/usr/bin/env python3
def test():
cedTest = ["U²sgal²sdi ạ²dv¹ne²³li⁴sgi.", "Ụ²wo²³dị³ge⁴ɂi gi²hli a¹ke²³he³²ga na ạ²chu⁴ja.",
"Ạ²ni²³tạɂ³li ạ²ni²sgạ²ya a¹ni²no²hạ²li²³do³²he, ạ²hwi du¹ni²hyọ²he.",
"Sa¹gwu⁴hno ạ²sgạ²ya gạ²lo¹gwe³ ga²ne²he sọ³ɂị³hnv³ hla².",
"Na³hnv³ gạ²lo¹gwe³ ga²ne⁴hi u²dlv²³kwsạ²ti ge¹se³, ạ²le go²hu⁴sdi yu²³dv³²ne⁴la a¹dlv²³kwsge³.",
"A¹na³ɂi²sv⁴hnv go²hu⁴sdi wu²³ni³go²he do²jụ²wạ³ɂị²hlv,",
"na³hnv³ gạ²lo¹gwe³ ga²ne⁴hi kị²lạ²gwu ị²yv⁴da wị²du²³sdạ³yo²hle³ o²³sdạ²gwu nu²³ksẹ²stạ²nv⁴na ị²yu³sdi da¹sdạ²yo²hị²hv⁴.",
"U²do²hị²yu⁴hnv³ wu²³yo³hle³ ạ²le u¹ni²go²he³ gạ²nv³gv⁴.",
"Na³hnv³ gạ²lo¹gwe³ nị²ga²³ne³hv⁴na \"ạ²hwi e¹ni²yo³ɂa!\" u¹dv²hne.",
"\"Ji²yo³ɂe³²ga\" u¹dv²hne na³ gạ²lo¹gwe³ ga²ne⁴hi, a¹dlv²³kwsgv³.",
"U¹na³ne²lu²³gi³²se do²jụ²wạ³ɂị²hlv³ di³dla, nạ²ɂv²³hnị³ge⁴hnv wu²³ni³luh²ja u¹ni²go²he³ so²³gwị³li gạɂ³nv⁴.",
"\"So²³gwị³lị³le³² i¹nạ²da²hị³si\" u¹dv²hne³ na³ u²yo²hlv⁴.", "\"Hạ²da²hị³se³²ga³\" a¹go¹se²³le³."]
for a in cedTest:
print("_______________");
print();
print(a);
print(ced2mco(a));
asciiCedText = ["ga.2da.2de3ga", "ha.2da.2du1ga", "u2da.2di23nv32di", "u1da.2di23nv32sv23?i", "a1da.2de3go3?i"]
for a in asciiCedText:
print("_______________");
print();
print(a);
print(ascii_ced2mco(a));
return
# Converts MCO annotation into pseudo English phonetics for use by the aeneas alignment package
# lines prefixed with '#' are returned with the '#' removed, but otherwise unchanged.
def mco2espeak(text: str):
import unicodedata as ud
import re
if (len(text.strip()) == 0):
return ""
# Handle specially flagged text
if (text[0].strip() == "#"):
if text[1] != "!":
return text.strip()[1:]
else:
text = text[2:]
newText = ud.normalize('NFD', text.strip()).lower()
if (newText[0] == ""):
newText = newText[1:]
# remove all tone indicators
newText = re.sub("[\u030C\u0302\u0300\u0301\u030b]", "", newText)
newText = "[[" + newText.strip() + "]]"
newText = newText.replace(" ", "]] [[")
newText = newText.replace("'", "]]'[[")
newText = newText.replace(".]]", "]].")
newText = newText.replace(",]]", "]],")
newText = newText.replace("!]]", "]]!")
newText = newText.replace("?]]", "]]?")
newText = newText.replace(":]]", "]]:")
newText = newText.replace(";]]", "]];")
newText = newText.replace("\"]]", "]]\"")
newText = newText.replace("']]", "]]'")
newText = newText.replace(" ]]", "]] ")
newText = newText.replace("[[ ", " [[")
newText = re.sub("(?i)([aeiouv]):", "\\1", newText)
# convert all vowels into approximate espeak x-sampa escaped forms
newText = newText.replace("A", "0")
newText = newText.replace("a", "0")
newText = newText.replace("v", "V")
newText = newText.replace("tl", "tl#")
newText = newText.replace("hl", "l#")
newText = newText.replace("J", "dZ")
newText = newText.replace("j", "dZ")
newText = newText.replace("Y", "j")
newText = newText.replace("y", "j")
newText = newText.replace("Ch", "tS")
newText = newText.replace("ch", "tS")
newText = newText.replace("ɂ", "?")
return newText
def ced2mco(text: str):
import unicodedata as ud
import re
tones2mco = [("²³", "\u030C"), ("³²", "\u0302"), ("¹", "\u0300"), ("²", ""), ("³", "\u0301"), ("⁴", "\u030b")]
text = ud.normalize('NFD', text)
text = re.sub("(?i)([aeiouv])([^¹²³⁴\u0323]+)", "\\1\u0323\\2", text)
text = re.sub("(?i)([aeiouv])([¹²³⁴]+)$", "\\1\u0323\\2", text)
text = re.sub("(?i)([aeiouv])([¹²³⁴]+)([^¹²³⁴a-zɂ])", "\\1\u0323\\2\\3", text)
text = re.sub("(?i)([^aeiouv\u0323¹²³⁴]+)([¹²³⁴]+)", "\\2\\1", text)
text = re.sub("(?i)([aeiouv])([¹²³⁴]+)", "\\1\\2:", text)
text = text.replace("\u0323", "")
text = re.sub("(?i)([aeiouv])²$", "\\1\u0304", text)
text = re.sub("(?i)([aeiouv])²([^a-zɂ¹²³⁴:])", "\\1\u0304\\2", text)
for ced2mcotone in tones2mco:
text = text.replace(ced2mcotone[0], ced2mcotone[1])
#
return ud.normalize('NFC', text)
def ascii_ced2mco(text: str):
import unicodedata as ud
text = ud.normalize('NFD', text)
return ced2mco(ascii_ced2ced(text))
def ascii_ced2ced(text: str):
import unicodedata as ud
text = ud.normalize('NFD', text)
text = text.replace(".", "\u0323")
text = text.replace("1", "¹")
text = text.replace("2", "²")
text = text.replace("3", "³")
text = text.replace("4", "⁴")
text = text.replace("?", "ɂ")
return text
if __name__ == "__main__":
test()
|
[
"re.sub",
"unicodedata.normalize"
] |
[((2122, 2152), 're.sub', 're.sub', (['"""[̌̂̀́̋]"""', '""""""', 'newText'], {}), "('[̌̂̀́̋]', '', newText)\n", (2128, 2152), False, 'import re\n'), ((2766, 2807), 're.sub', 're.sub', (['"""(?i)([aeiouv]):"""', '"""\\\\1"""', 'newText'], {}), "('(?i)([aeiouv]):', '\\\\1', newText)\n", (2772, 2807), False, 'import re\n'), ((3587, 3612), 'unicodedata.normalize', 'ud.normalize', (['"""NFD"""', 'text'], {}), "('NFD', text)\n", (3599, 3612), True, 'import unicodedata as ud\n'), ((3624, 3676), 're.sub', 're.sub', (['"""(?i)([aeiouv])([^¹²³⁴̣]+)"""', '"""\\\\1̣\\\\2"""', 'text'], {}), "('(?i)([aeiouv])([^¹²³⁴̣]+)', '\\\\1̣\\\\2', text)\n", (3630, 3676), False, 'import re\n'), ((3698, 3749), 're.sub', 're.sub', (['"""(?i)([aeiouv])([¹²³⁴]+)$"""', '"""\\\\1̣\\\\2"""', 'text'], {}), "('(?i)([aeiouv])([¹²³⁴]+)$', '\\\\1̣\\\\2', text)\n", (3704, 3749), False, 'import re\n'), ((3766, 3832), 're.sub', 're.sub', (['"""(?i)([aeiouv])([¹²³⁴]+)([^¹²³⁴a-zɂ])"""', '"""\\\\1̣\\\\2\\\\3"""', 'text'], {}), "('(?i)([aeiouv])([¹²³⁴]+)([^¹²³⁴a-zɂ])', '\\\\1̣\\\\2\\\\3', text)\n", (3772, 3832), False, 'import re\n'), ((3849, 3905), 're.sub', 're.sub', (['"""(?i)([^aeiouṿ¹²³⁴]+)([¹²³⁴]+)"""', '"""\\\\2\\\\1"""', 'text'], {}), "('(?i)([^aeiouṿ¹²³⁴]+)([¹²³⁴]+)', '\\\\2\\\\1', text)\n", (3855, 3905), False, 'import re\n'), ((3922, 3972), 're.sub', 're.sub', (['"""(?i)([aeiouv])([¹²³⁴]+)"""', '"""\\\\1\\\\2:"""', 'text'], {}), "('(?i)([aeiouv])([¹²³⁴]+)', '\\\\1\\\\2:', text)\n", (3928, 3972), False, 'import re\n'), ((4022, 4062), 're.sub', 're.sub', (['"""(?i)([aeiouv])²$"""', '"""\\\\1̄"""', 'text'], {}), "('(?i)([aeiouv])²$', '\\\\1̄', text)\n", (4028, 4062), False, 'import re\n'), ((4079, 4135), 're.sub', 're.sub', (['"""(?i)([aeiouv])²([^a-zɂ¹²³⁴:])"""', '"""\\\\1̄\\\\2"""', 'text'], {}), "('(?i)([aeiouv])²([^a-zɂ¹²³⁴:])', '\\\\1̄\\\\2', text)\n", (4085, 4135), False, 'import re\n'), ((4252, 4277), 'unicodedata.normalize', 'ud.normalize', (['"""NFC"""', 'text'], {}), "('NFC', text)\n", (4264, 4277), True, 'import unicodedata as ud\n'), ((4350, 4375), 'unicodedata.normalize', 'ud.normalize', (['"""NFD"""', 'text'], {}), "('NFD', text)\n", (4362, 4375), True, 'import unicodedata as ud\n'), ((4488, 4513), 'unicodedata.normalize', 'ud.normalize', (['"""NFD"""', 'text'], {}), "('NFD', text)\n", (4500, 4513), True, 'import unicodedata as ud\n')]
|
import psycopg2
url = "dbname='da43n1slakcjkc' user='msqgxzgmcskvst' host='ec2-54-80-184-43.compute-1.amazonaws.com' port=5432 password='<PASSWORD>'"
class database_setup(object):
def __init__(self):
self.conn = psycopg2.connect(url)
self.cursor = self.conn.cursor()
def destroy_tables(self):
self.cursor.execute("""DROP TABLE IF EXISTS user CASCADE;""")
self.conn.commit()
def create_tables(self):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS Users (
user_id SERIAL NOT NULL,
fname VARCHAR(25) NOT NULL,
lname VARCHAR(25) NOT NULL,
post_date DATE NOT NULL DEFAULT CURRENT_DATE,
email VARCHAR(50) UNIQUE NOT NULL,
password VARCHAR(256) NOT NULL,
photo VARCHAR(255) NOT NULL,
PRIMARY KEY (email)
);""")
self.cursor.execute("""CREATE TABLE IF NOT EXISTS Report (
report_id SERIAL NOT NULL,
num_tweet INT NOT NULL,
tweet VARCHAR(255) NOT NULL,
plot_bar VARCHAR(255) NOT NULL,
plot_pie VARCHAR(255) NOT NULL,
post_date DATE NOT NULL DEFAULT CURRENT_DATE,
email VARCHAR(50) REFERENCES Users(email) NOT NULL,
PRIMARY KEY (report_id)
);""")
self.cursor.execute("""CREATE TABLE IF NOT EXISTS Config (
config_id SERIAL NOT NULL,
consumerKey TEXT NOT NULL,
consumerSecret TEXT NOT NULL,
accessToken TEXT NOT NULL,
accessSecret TEXT NOT NULL,
email VARCHAR(50) REFERENCES Users(email) NOT NULL,
PRIMARY KEY (config_id)
);""")
self.conn.commit()
|
[
"psycopg2.connect"
] |
[((227, 248), 'psycopg2.connect', 'psycopg2.connect', (['url'], {}), '(url)\n', (243, 248), False, 'import psycopg2\n')]
|
import numpy as np
import shapely.geometry as geom
class Bbox:
def __init__(self, name, part_id, depth_image, xyz, box_size, projection):
if not isinstance(xyz, np.ndarray):
raise ValueError("xyz must be an np.ndarray")
self.name = name
self.id = part_id
self.center = np.array([xyz[0], xyz[1]])
self.z = xyz[2]
self.im_d = depth_image
self.im_d[self.im_d == 0] = 255
x_delta_scaled = box_size[0]/2
self.weight = 1.0
y_delta_scaled = box_size[1]/2
self.xmin, self.xmax = xyz[0]-x_delta_scaled, xyz[0]+x_delta_scaled
self.ymin, self.ymax = xyz[1]-y_delta_scaled, xyz[1]+y_delta_scaled
self.poly = geom.box(self.xmin, self.ymin, self.xmax, self.ymax)
self.color_min = (int(projection['fx']*self.xmin/xyz[2] + projection['cx']),
int(projection['fy']*self.ymin/xyz[2] + projection['cy']))
self.color_max = (int(projection['fx']*self.xmax/xyz[2] + projection['cx']),
int(projection['fy']*self.ymax/xyz[2] + projection['cy']))
self.depth_min = (int(projection['fx_d']*self.xmin/xyz[2] + projection['cx_d']),
int(projection['fy_d']*self.ymin/xyz[2] + projection['cy_d']))
self.depth_max = (int(projection['fx_d']*self.xmax/xyz[2] + projection['cx_d']),
int(projection['fy_d']*self.ymax/xyz[2] + projection['cy_d']))
def __str__(self):
return "{{{: 1.4f},{: 1.4f}}}, {{{: 1.4f},{: 1.4f}}}".format(self.xmin, self.ymin, self.xmax, self.ymax)
def __repr__(self):
return "(bbox: {{{: 1.4f},{: 1.4f}}}, {{{: 1.4f},{: 1.4f}}})".format(self.xmin, self.ymin, self.xmax, self.ymax)
def size(self):
return (self.xmax - self.xmin) * (self.ymax - self.ymin)
def get_bb_depth_matrix(self):
""" Get the portion of the depth image inside the bounding box """
min_x, max_x = sorted((self.depth_min[0], self.depth_max[0]))
min_y, max_y = sorted((self.depth_min[1], self.depth_max[1]))
bounded_im = self.im_d[min_y: max_y+1, min_x: max_x+1]
return bounded_im
def overlap(self, bb2):
dx = min(self.xmax, bb2.xmax) - max(self.xmin, bb2.xmin)
dy = min(self.ymax, bb2.ymax) - max(self.ymin, bb2.ymin)
if (dx>=0) and (dy>=0):
return dx*dy
return 0
def p_over(self, bb2):
return self.overlap(bb2)/(min(self.size(), bb2.size()))
def p_depth(self, bb2):
bounded_im1 = self.get_bb_depth_matrix()
bounded_im2 = bb2.get_bb_depth_matrix()
print(bounded_im1.empty or bounded_im2.empty)
mean1 = np.mean(bounded_im1)
mean2 = np.mean(bounded_im2)
stdev1 = np.std(bounded_im1)
stdev2 = np.std(bounded_im2)
half_negative_square_of_mean_difference = -1/2 * (mean1 - mean2) ** 2
term1_power = half_negative_square_of_mean_difference / (stdev1 ** 2)
term2_power = half_negative_square_of_mean_difference / (stdev2 ** 2)
out = (np.exp(term1_power) + np.exp(term2_power))/2
return out
def prob(self, bb2, alpha):
return alpha * self.p_over(bb2) + (1-alpha) * self.p_depth(bb2)
|
[
"numpy.mean",
"shapely.geometry.box",
"numpy.exp",
"numpy.array",
"numpy.std"
] |
[((318, 344), 'numpy.array', 'np.array', (['[xyz[0], xyz[1]]'], {}), '([xyz[0], xyz[1]])\n', (326, 344), True, 'import numpy as np\n'), ((717, 769), 'shapely.geometry.box', 'geom.box', (['self.xmin', 'self.ymin', 'self.xmax', 'self.ymax'], {}), '(self.xmin, self.ymin, self.xmax, self.ymax)\n', (725, 769), True, 'import shapely.geometry as geom\n'), ((2697, 2717), 'numpy.mean', 'np.mean', (['bounded_im1'], {}), '(bounded_im1)\n', (2704, 2717), True, 'import numpy as np\n'), ((2734, 2754), 'numpy.mean', 'np.mean', (['bounded_im2'], {}), '(bounded_im2)\n', (2741, 2754), True, 'import numpy as np\n'), ((2772, 2791), 'numpy.std', 'np.std', (['bounded_im1'], {}), '(bounded_im1)\n', (2778, 2791), True, 'import numpy as np\n'), ((2809, 2828), 'numpy.std', 'np.std', (['bounded_im2'], {}), '(bounded_im2)\n', (2815, 2828), True, 'import numpy as np\n'), ((3079, 3098), 'numpy.exp', 'np.exp', (['term1_power'], {}), '(term1_power)\n', (3085, 3098), True, 'import numpy as np\n'), ((3101, 3120), 'numpy.exp', 'np.exp', (['term2_power'], {}), '(term2_power)\n', (3107, 3120), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful Script constants and utils."""
from test_framework.script import CScript
# To prevent a "tx-size-small" policy rule error, a transaction has to have a
# non-witness size of at least 82 bytes (MIN_STANDARD_TX_NONWITNESS_SIZE in
# src/policy/policy.h). Considering a Tx with the smallest possible single
# input (blank, empty scriptSig), and with an output omitting the scriptPubKey,
# we get to a minimum size of 60 bytes:
#
# Tx Skeleton: 4 [Version] + 1 [InCount] + 1 [OutCount] + 4 [LockTime] = 10 bytes
# Blank Input: 32 [PrevTxHash] + 4 [Index] + 1 [scriptSigLen] + 4 [SeqNo] = 41 bytes
# Output: 8 [Amount] + 1 [scriptPubKeyLen] = 9 bytes
#
# Hence, the scriptPubKey of the single output has to have a size of at
# least 22 bytes, which corresponds to the size of a P2WPKH scriptPubKey.
# The following script constant consists of a single push of 21 bytes of 'a':
# <PUSH_21> <21-bytes of 'a'>
# resulting in a 22-byte size. It should be used whenever (small) fake
# scriptPubKeys are needed, to guarantee that the minimum transaction size is
# met.
DUMMY_P2WPKH_SCRIPT = CScript([b'a' * 21])
|
[
"test_framework.script.CScript"
] |
[((1305, 1325), 'test_framework.script.CScript', 'CScript', (["[b'a' * 21]"], {}), "([b'a' * 21])\n", (1312, 1325), False, 'from test_framework.script import CScript\n')]
|
import os
import numpy as np
import pytest
import vtk
import pyvista
from pyvista import examples
from pyvista.plotting import system_supports_plotting
beam = pyvista.UnstructuredGrid(examples.hexbeamfile)
# create structured grid
x = np.arange(-10, 10, 2)
y = np.arange(-10, 10, 2)
z = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(x, y, z)
sgrid = pyvista.StructuredGrid(x, y, z)
try:
test_path = os.path.dirname(os.path.abspath(__file__))
test_data_path = os.path.join(test_path, 'test_data')
except:
test_path = '/home/alex/afrl/python/source/pyvista/tests'
def test_volume():
assert beam.volume > 0.0
@pytest.mark.skipif(not system_supports_plotting(), reason="Requires system to support plotting")
def test_struct_example():
# create and plot structured grid
grid = examples.load_structured()
cpos = grid.plot(off_screen=True) # basic plot
assert isinstance(cpos, pyvista.CameraPosition)
# Plot mean curvature
cpos_curv = grid.plot_curvature(off_screen=True)
assert isinstance(cpos_curv, pyvista.CameraPosition)
def test_init_from_structured():
unstruct_grid = pyvista.UnstructuredGrid(sgrid)
assert unstruct_grid.points.shape[0] == x.size
assert np.all(unstruct_grid.celltypes == 12)
def test_init_from_unstructured():
grid = pyvista.UnstructuredGrid(beam, deep=True)
grid.points += 1
assert not np.any(grid.points == beam.points)
def test_init_bad_input():
with pytest.raises(Exception):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1))
with pytest.raises(Exception):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1),
np.array(1),
np.array(1),
'woa')
def test_init_from_arrays():
offset = np.array([0, 9], np.int8)
cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int32)
cell1 = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
cell2 = np.array([[0, 0, 2],
[1, 0, 2],
[1, 1, 2],
[0, 1, 2],
[0, 0, 3],
[1, 0, 3],
[1, 1, 3],
[0, 1, 3]])
points = np.vstack((cell1, cell2)).astype(np.int32)
grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points)
assert grid.n_cells == 2
assert np.allclose(grid.offset, offset)
def test_surface_indices():
surf = beam.extract_surface()
surf_ind = surf.point_arrays['vtkOriginalPointIds']
assert np.allclose(surf_ind, beam.surface_indices())
def test_extract_feature_edges():
edges = beam.extract_feature_edges(90)
assert edges.n_points
edges = beam.extract_feature_edges(180)
assert not edges.n_points
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vtu', 'vtk'])
def test_save(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
beam.save(filename, binary)
grid = pyvista.UnstructuredGrid(filename)
assert grid.cells.shape == beam.cells.shape
assert grid.points.shape == beam.points.shape
grid = pyvista.read(filename)
assert grid.cells.shape == beam.cells.shape
assert grid.points.shape == beam.points.shape
assert isinstance(grid, pyvista.UnstructuredGrid)
def test_init_bad_filename():
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid(filename)
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid('not a file')
def test_save_bad_extension():
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid('file.abc')
def test_linear_copy():
# need a grid with quadratic cells
lgrid = beam.linear_copy()
assert np.all(lgrid.celltypes < 20)
def test_extract_cells():
ind = [1, 2, 3]
part_beam = beam.extract_cells(ind)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < beam.n_points
mask = np.zeros(beam.n_cells, np.bool)
mask[:3] = True
part_beam = beam.extract_cells(mask)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < beam.n_points
def test_merge():
grid = beam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(beam, inplace=False, merge_points=False)
grid.merge(beam, inplace=True, merge_points=True)
assert grid.n_points > beam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_not_main():
grid = beam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(beam, inplace=False, merge_points=False,
main_has_priority=False)
grid.merge(beam, inplace=True, merge_points=True)
assert grid.n_points > beam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_list():
grid_a = beam.copy()
grid_a.points[:, 0] += 1
grid_b = beam.copy()
grid_b.points[:, 1] += 1
grid_a.merge([beam, grid_b], inplace=True, merge_points=True)
assert grid_a.n_points > beam.n_points
def test_init_structured():
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
grid = pyvista.StructuredGrid(x, y, z)
assert np.allclose(sgrid.x, x)
assert np.allclose(sgrid.y, y)
assert np.allclose(sgrid.z, z)
grid_a = pyvista.StructuredGrid(grid)
assert np.allclose(grid_a.points, grid.points)
def test_invalid_init_structured():
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
z = z[:, :, :2]
with pytest.raises(Exception):
grid = pyvista.StructuredGrid(x, y, z)
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vts', 'vtk'])
def test_save_structured(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
sgrid.save(filename, binary)
grid = pyvista.StructuredGrid(filename)
assert grid.x.shape == sgrid.y.shape
assert grid.n_cells
assert grid.points.shape == sgrid.points.shape
grid = pyvista.read(filename)
assert grid.x.shape == sgrid.y.shape
assert grid.n_cells
assert grid.points.shape == sgrid.points.shape
assert isinstance(grid, pyvista.StructuredGrid)
def test_load_structured_bad_filename():
with pytest.raises(Exception):
pyvista.StructuredGrid('not a file')
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(Exception):
grid = pyvista.StructuredGrid(filename)
def test_create_rectilinear_grid_from_specs():
# 3D example
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 5)
zrng = np.arange(-10, 10, 1)
grid = pyvista.RectilinearGrid(xrng)
assert grid.n_cells == 9
assert grid.n_points == 10
grid = pyvista.RectilinearGrid(xrng, yrng)
assert grid.n_cells == 9*3
assert grid.n_points == 10*4
grid = pyvista.RectilinearGrid(xrng, yrng, zrng)
assert grid.n_cells == 9*3*19
assert grid.n_points == 10*4*20
assert grid.bounds == [-10.0,8.0, -10.0,5.0, -10.0,9.0]
# 2D example
cell_spacings = np.array([1., 1., 2., 2., 5., 10.])
x_coordinates = np.cumsum(cell_spacings)
y_coordinates = np.cumsum(cell_spacings)
grid = pyvista.RectilinearGrid(x_coordinates, y_coordinates)
assert grid.n_cells == 5*5
assert grid.n_points == 6*6
assert grid.bounds == [1.,21., 1.,21., 0.,0.]
def test_create_rectilinear_after_init():
x = np.array([0,1,2])
y = np.array([0,5,8])
z = np.array([3,2,1])
grid = pyvista.RectilinearGrid()
grid.x = x
assert grid.dimensions == [3, 1, 1]
grid.y = y
assert grid.dimensions == [3, 3, 1]
grid.z = z
assert grid.dimensions == [3, 3, 3]
assert np.allclose(grid.x, x)
assert np.allclose(grid.y, y)
assert np.allclose(grid.z, z)
def test_create_rectilinear_grid_from_file():
grid = examples.load_rectilinear()
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_read_rectilinear_grid_from_file():
grid = pyvista.read(examples.rectfile)
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_cast_rectilinear_grid():
grid = pyvista.read(examples.rectfile)
structured = grid.cast_to_structured_grid()
assert isinstance(structured, pyvista.StructuredGrid)
assert structured.n_points == grid.n_points
assert structured.n_cells == grid.n_cells
assert np.allclose(structured.points, grid.points)
for k, v in grid.point_arrays.items():
assert np.allclose(structured.point_arrays[k], v)
for k, v in grid.cell_arrays.items():
assert np.allclose(structured.cell_arrays[k], v)
def test_create_uniform_grid_from_specs():
# create UniformGrid
dims = [10, 10, 10]
grid = pyvista.UniformGrid(dims) # Using default spacing and origin
assert grid.dimensions == [10, 10, 10]
assert grid.extent == [0, 9, 0, 9, 0, 9]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [1.0, 1.0, 1.0]
spacing = [2, 1, 5]
grid = pyvista.UniformGrid(dims, spacing) # Using default origin
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [2.0, 1.0, 5.0]
origin = [10, 35, 50]
grid = pyvista.UniformGrid(dims, spacing, origin) # Everything is specified
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [10.0, 35.0, 50.0]
assert grid.spacing == [2.0, 1.0, 5.0]
assert grid.dimensions == [10, 10, 10]
def test_uniform_setters():
grid = pyvista.UniformGrid()
grid.dimensions = [10, 10, 10]
assert grid.GetDimensions() == (10, 10, 10)
assert grid.dimensions == [10, 10, 10]
grid.spacing = [5, 2, 1]
assert grid.GetSpacing() == (5, 2, 1)
assert grid.spacing == [5, 2, 1]
grid.origin = [6, 27.7, 19.8]
assert grid.GetOrigin() == (6, 27.7, 19.8)
assert grid.origin == [6, 27.7, 19.8]
def test_create_uniform_grid_from_file():
grid = examples.load_uniform()
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_read_uniform_grid_from_file():
grid = pyvista.read(examples.uniformfile)
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_cast_uniform_to_structured():
grid = examples.load_uniform()
structured = grid.cast_to_structured_grid()
assert structured.n_points == grid.n_points
assert structured.n_arrays == grid.n_arrays
assert structured.bounds == grid.bounds
def test_cast_uniform_to_rectilinear():
grid = examples.load_uniform()
rectilinear = grid.cast_to_rectilinear_grid()
assert rectilinear.n_points == grid.n_points
assert rectilinear.n_arrays == grid.n_arrays
assert rectilinear.bounds == grid.bounds
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vtr', 'vtk'])
def test_save_rectilinear(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
ogrid = examples.load_rectilinear()
ogrid.save(filename, binary)
grid = pyvista.RectilinearGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.RectilinearGrid)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vti', 'vtk'])
def test_save_uniform(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
ogrid = examples.load_uniform()
ogrid.save(filename, binary)
grid = pyvista.UniformGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.UniformGrid)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
def test_grid_points():
"""Test the points methods on UniformGrid and RectilinearGrid"""
points = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
grid = pyvista.UniformGrid()
grid.points = points
assert grid.dimensions == [2, 2, 2]
assert grid.spacing == [1, 1, 1]
assert grid.origin == [0., 0., 0.]
assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0))
opts = np.c_[grid.x, grid.y, grid.z]
assert np.allclose(np.unique(opts, axis=0), np.unique(points, axis=0))
# Now test rectilinear grid
del grid
grid = pyvista.RectilinearGrid()
grid.points = points
assert grid.dimensions == [2, 2, 2]
assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0))
def test_grid_extract_selection_points():
grid = pyvista.UnstructuredGrid(sgrid)
sub_grid = grid.extract_selection_points([0])
assert sub_grid.n_cells == 1
sub_grid = grid.extract_selection_points(range(100))
assert sub_grid.n_cells > 1
def test_gaussian_smooth():
uniform = examples.load_uniform()
active = uniform.active_scalars_name
values = uniform.active_scalars
uniform = uniform.gaussian_smooth(scalars=active)
assert uniform.active_scalars_name == active
assert uniform.active_scalars.shape == values.shape
assert not np.all(uniform.active_scalars == values)
values = uniform.active_scalars
uniform = uniform.gaussian_smooth(radius_factor=5, std_dev=1.3)
assert uniform.active_scalars_name == active
assert uniform.active_scalars.shape == values.shape
assert not np.all(uniform.active_scalars == values)
|
[
"numpy.array",
"pyvista.UnstructuredGrid",
"numpy.arange",
"pyvista.UniformGrid",
"pyvista.plotting.system_supports_plotting",
"pyvista.examples.load_structured",
"numpy.vstack",
"numpy.meshgrid",
"numpy.allclose",
"numpy.any",
"pytest.raises",
"pyvista.examples.load_uniform",
"pyvista.StructuredGrid",
"os.path.abspath",
"numpy.unique",
"os.path.join",
"pyvista.examples.load_rectilinear",
"pyvista.RectilinearGrid",
"pytest.mark.parametrize",
"numpy.zeros",
"pyvista.read",
"numpy.cumsum",
"numpy.all"
] |
[((162, 208), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['examples.hexbeamfile'], {}), '(examples.hexbeamfile)\n', (186, 208), False, 'import pyvista\n'), ((239, 260), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (248, 260), True, 'import numpy as np\n'), ((265, 286), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (274, 286), True, 'import numpy as np\n'), ((291, 312), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (300, 312), True, 'import numpy as np\n'), ((323, 343), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (334, 343), True, 'import numpy as np\n'), ((352, 383), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (374, 383), False, 'import pyvista\n'), ((3144, 3192), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""binary"""', '[True, False]'], {}), "('binary', [True, False])\n", (3167, 3192), False, 'import pytest\n'), ((3194, 3246), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""extension"""', "['vtu', 'vtk']"], {}), "('extension', ['vtu', 'vtk'])\n", (3217, 3246), False, 'import pytest\n'), ((6169, 6217), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""binary"""', '[True, False]'], {}), "('binary', [True, False])\n", (6192, 6217), False, 'import pytest\n'), ((6219, 6271), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""extension"""', "['vts', 'vtk']"], {}), "('extension', ['vts', 'vtk'])\n", (6242, 6271), False, 'import pytest\n'), ((11768, 11816), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""binary"""', '[True, False]'], {}), "('binary', [True, False])\n", (11791, 11816), False, 'import pytest\n'), ((11818, 11870), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""extension"""', "['vtr', 'vtk']"], {}), "('extension', ['vtr', 'vtk'])\n", (11841, 11870), False, 'import pytest\n'), ((12618, 12666), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""binary"""', '[True, False]'], {}), "('binary', [True, False])\n", (12641, 12666), False, 'import pytest\n'), ((12668, 12720), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""extension"""', "['vti', 'vtk']"], {}), "('extension', ['vti', 'vtk'])\n", (12691, 12720), False, 'import pytest\n'), ((470, 506), 'os.path.join', 'os.path.join', (['test_path', '"""test_data"""'], {}), "(test_path, 'test_data')\n", (482, 506), False, 'import os\n'), ((803, 829), 'pyvista.examples.load_structured', 'examples.load_structured', ([], {}), '()\n', (827, 829), False, 'from pyvista import examples\n'), ((1126, 1157), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['sgrid'], {}), '(sgrid)\n', (1150, 1157), False, 'import pyvista\n'), ((1220, 1257), 'numpy.all', 'np.all', (['(unstruct_grid.celltypes == 12)'], {}), '(unstruct_grid.celltypes == 12)\n', (1226, 1257), True, 'import numpy as np\n'), ((1306, 1347), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['beam'], {'deep': '(True)'}), '(beam, deep=True)\n', (1330, 1347), False, 'import pyvista\n'), ((1866, 1891), 'numpy.array', 'np.array', (['[0, 9]', 'np.int8'], {}), '([0, 9], np.int8)\n', (1874, 1891), True, 'import numpy as np\n'), ((1904, 1974), 'numpy.array', 'np.array', (['[8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15]'], {}), '([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])\n', (1912, 1974), True, 'import numpy as np\n'), ((1991, 2051), 'numpy.array', 'np.array', (['[vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON]', 'np.int32'], {}), '([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int32)\n', (1999, 2051), True, 'import numpy as np\n'), ((2065, 2167), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 1\n ], [0, 1, 1]]'], {}), '([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1],\n [1, 1, 1], [0, 1, 1]])\n', (2073, 2167), True, 'import numpy as np\n'), ((2331, 2433), 'numpy.array', 'np.array', (['[[0, 0, 2], [1, 0, 2], [1, 1, 2], [0, 1, 2], [0, 0, 3], [1, 0, 3], [1, 1, 3\n ], [0, 1, 3]]'], {}), '([[0, 0, 2], [1, 0, 2], [1, 1, 2], [0, 1, 2], [0, 0, 3], [1, 0, 3],\n [1, 1, 3], [0, 1, 3]])\n', (2339, 2433), True, 'import numpy as np\n'), ((2652, 2710), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['offset', 'cells', 'cell_type', 'points'], {}), '(offset, cells, cell_type, points)\n', (2676, 2710), False, 'import pyvista\n'), ((2752, 2784), 'numpy.allclose', 'np.allclose', (['grid.offset', 'offset'], {}), '(grid.offset, offset)\n', (2763, 2784), True, 'import numpy as np\n'), ((3403, 3437), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['filename'], {}), '(filename)\n', (3427, 3437), False, 'import pyvista\n'), ((3548, 3570), 'pyvista.read', 'pyvista.read', (['filename'], {}), '(filename)\n', (3560, 3570), False, 'import pyvista\n'), ((3770, 3809), 'os.path.join', 'os.path.join', (['test_path', '"""test_grid.py"""'], {}), "(test_path, 'test_grid.py')\n", (3782, 3809), False, 'import os\n'), ((4212, 4240), 'numpy.all', 'np.all', (['(lgrid.celltypes < 20)'], {}), '(lgrid.celltypes < 20)\n', (4218, 4240), True, 'import numpy as np\n'), ((4428, 4459), 'numpy.zeros', 'np.zeros', (['beam.n_cells', 'np.bool'], {}), '(beam.n_cells, np.bool)\n', (4436, 4459), True, 'import numpy as np\n'), ((5509, 5530), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (5518, 5530), True, 'import numpy as np\n'), ((5542, 5563), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (5551, 5563), True, 'import numpy as np\n'), ((5575, 5596), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (5584, 5596), True, 'import numpy as np\n'), ((5611, 5640), 'numpy.meshgrid', 'np.meshgrid', (['xrng', 'yrng', 'zrng'], {}), '(xrng, yrng, zrng)\n', (5622, 5640), True, 'import numpy as np\n'), ((5652, 5683), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (5674, 5683), False, 'import pyvista\n'), ((5695, 5718), 'numpy.allclose', 'np.allclose', (['sgrid.x', 'x'], {}), '(sgrid.x, x)\n', (5706, 5718), True, 'import numpy as np\n'), ((5730, 5753), 'numpy.allclose', 'np.allclose', (['sgrid.y', 'y'], {}), '(sgrid.y, y)\n', (5741, 5753), True, 'import numpy as np\n'), ((5765, 5788), 'numpy.allclose', 'np.allclose', (['sgrid.z', 'z'], {}), '(sgrid.z, z)\n', (5776, 5788), True, 'import numpy as np\n'), ((5803, 5831), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['grid'], {}), '(grid)\n', (5825, 5831), False, 'import pyvista\n'), ((5843, 5882), 'numpy.allclose', 'np.allclose', (['grid_a.points', 'grid.points'], {}), '(grid_a.points, grid.points)\n', (5854, 5882), True, 'import numpy as np\n'), ((5932, 5953), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (5941, 5953), True, 'import numpy as np\n'), ((5965, 5986), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (5974, 5986), True, 'import numpy as np\n'), ((5998, 6019), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (6007, 6019), True, 'import numpy as np\n'), ((6034, 6063), 'numpy.meshgrid', 'np.meshgrid', (['xrng', 'yrng', 'zrng'], {}), '(xrng, yrng, zrng)\n', (6045, 6063), True, 'import numpy as np\n'), ((6440, 6472), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['filename'], {}), '(filename)\n', (6462, 6472), False, 'import pyvista\n'), ((6601, 6623), 'pyvista.read', 'pyvista.read', (['filename'], {}), '(filename)\n', (6613, 6623), False, 'import pyvista\n'), ((6931, 6970), 'os.path.join', 'os.path.join', (['test_path', '"""test_grid.py"""'], {}), "(test_path, 'test_grid.py')\n", (6943, 6970), False, 'import os\n'), ((7131, 7152), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (7140, 7152), True, 'import numpy as np\n'), ((7164, 7185), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(5)'], {}), '(-10, 10, 5)\n', (7173, 7185), True, 'import numpy as np\n'), ((7197, 7218), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(1)'], {}), '(-10, 10, 1)\n', (7206, 7218), True, 'import numpy as np\n'), ((7230, 7259), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['xrng'], {}), '(xrng)\n', (7253, 7259), False, 'import pyvista\n'), ((7331, 7366), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['xrng', 'yrng'], {}), '(xrng, yrng)\n', (7354, 7366), False, 'import pyvista\n'), ((7442, 7483), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['xrng', 'yrng', 'zrng'], {}), '(xrng, yrng, zrng)\n', (7465, 7483), False, 'import pyvista\n'), ((7651, 7692), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0, 2.0, 5.0, 10.0]'], {}), '([1.0, 1.0, 2.0, 2.0, 5.0, 10.0])\n', (7659, 7692), True, 'import numpy as np\n'), ((7707, 7731), 'numpy.cumsum', 'np.cumsum', (['cell_spacings'], {}), '(cell_spacings)\n', (7716, 7731), True, 'import numpy as np\n'), ((7752, 7776), 'numpy.cumsum', 'np.cumsum', (['cell_spacings'], {}), '(cell_spacings)\n', (7761, 7776), True, 'import numpy as np\n'), ((7788, 7841), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['x_coordinates', 'y_coordinates'], {}), '(x_coordinates, y_coordinates)\n', (7811, 7841), False, 'import pyvista\n'), ((8007, 8026), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (8015, 8026), True, 'import numpy as np\n'), ((8033, 8052), 'numpy.array', 'np.array', (['[0, 5, 8]'], {}), '([0, 5, 8])\n', (8041, 8052), True, 'import numpy as np\n'), ((8059, 8078), 'numpy.array', 'np.array', (['[3, 2, 1]'], {}), '([3, 2, 1])\n', (8067, 8078), True, 'import numpy as np\n'), ((8088, 8113), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', ([], {}), '()\n', (8111, 8113), False, 'import pyvista\n'), ((8290, 8312), 'numpy.allclose', 'np.allclose', (['grid.x', 'x'], {}), '(grid.x, x)\n', (8301, 8312), True, 'import numpy as np\n'), ((8324, 8346), 'numpy.allclose', 'np.allclose', (['grid.y', 'y'], {}), '(grid.y, y)\n', (8335, 8346), True, 'import numpy as np\n'), ((8358, 8380), 'numpy.allclose', 'np.allclose', (['grid.z', 'z'], {}), '(grid.z, z)\n', (8369, 8380), True, 'import numpy as np\n'), ((8440, 8467), 'pyvista.examples.load_rectilinear', 'examples.load_rectilinear', ([], {}), '()\n', (8465, 8467), False, 'from pyvista import examples\n'), ((8690, 8721), 'pyvista.read', 'pyvista.read', (['examples.rectfile'], {}), '(examples.rectfile)\n', (8702, 8721), False, 'import pyvista\n'), ((8935, 8966), 'pyvista.read', 'pyvista.read', (['examples.rectfile'], {}), '(examples.rectfile)\n', (8947, 8966), False, 'import pyvista\n'), ((9178, 9221), 'numpy.allclose', 'np.allclose', (['structured.points', 'grid.points'], {}), '(structured.points, grid.points)\n', (9189, 9221), True, 'import numpy as np\n'), ((9528, 9553), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['dims'], {}), '(dims)\n', (9547, 9553), False, 'import pyvista\n'), ((9797, 9831), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['dims', 'spacing'], {}), '(dims, spacing)\n', (9816, 9831), False, 'import pyvista\n'), ((10020, 10062), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['dims', 'spacing', 'origin'], {}), '(dims, spacing, origin)\n', (10039, 10062), False, 'import pyvista\n'), ((10304, 10325), 'pyvista.UniformGrid', 'pyvista.UniformGrid', ([], {}), '()\n', (10323, 10325), False, 'import pyvista\n'), ((10738, 10761), 'pyvista.examples.load_uniform', 'examples.load_uniform', ([], {}), '()\n', (10759, 10761), False, 'from pyvista import examples\n'), ((11005, 11039), 'pyvista.read', 'pyvista.read', (['examples.uniformfile'], {}), '(examples.uniformfile)\n', (11017, 11039), False, 'import pyvista\n'), ((11283, 11306), 'pyvista.examples.load_uniform', 'examples.load_uniform', ([], {}), '()\n', (11304, 11306), False, 'from pyvista import examples\n'), ((11548, 11571), 'pyvista.examples.load_uniform', 'examples.load_uniform', ([], {}), '()\n', (11569, 11571), False, 'from pyvista import examples\n'), ((12007, 12034), 'pyvista.examples.load_rectilinear', 'examples.load_rectilinear', ([], {}), '()\n', (12032, 12034), False, 'from pyvista import examples\n'), ((12079, 12112), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['filename'], {}), '(filename)\n', (12102, 12112), False, 'import pyvista\n'), ((12165, 12193), 'numpy.allclose', 'np.allclose', (['grid.x', 'ogrid.x'], {}), '(grid.x, ogrid.x)\n', (12176, 12193), True, 'import numpy as np\n'), ((12205, 12233), 'numpy.allclose', 'np.allclose', (['grid.y', 'ogrid.y'], {}), '(grid.y, ogrid.y)\n', (12216, 12233), True, 'import numpy as np\n'), ((12245, 12273), 'numpy.allclose', 'np.allclose', (['grid.z', 'ogrid.z'], {}), '(grid.z, ogrid.z)\n', (12256, 12273), True, 'import numpy as np\n'), ((12332, 12354), 'pyvista.read', 'pyvista.read', (['filename'], {}), '(filename)\n', (12344, 12354), False, 'import pyvista\n'), ((12460, 12488), 'numpy.allclose', 'np.allclose', (['grid.x', 'ogrid.x'], {}), '(grid.x, ogrid.x)\n', (12471, 12488), True, 'import numpy as np\n'), ((12500, 12528), 'numpy.allclose', 'np.allclose', (['grid.y', 'ogrid.y'], {}), '(grid.y, ogrid.y)\n', (12511, 12528), True, 'import numpy as np\n'), ((12540, 12568), 'numpy.allclose', 'np.allclose', (['grid.z', 'ogrid.z'], {}), '(grid.z, ogrid.z)\n', (12551, 12568), True, 'import numpy as np\n'), ((12853, 12876), 'pyvista.examples.load_uniform', 'examples.load_uniform', ([], {}), '()\n', (12874, 12876), False, 'from pyvista import examples\n'), ((12921, 12950), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['filename'], {}), '(filename)\n', (12940, 12950), False, 'import pyvista\n'), ((13130, 13152), 'pyvista.read', 'pyvista.read', (['filename'], {}), '(filename)\n', (13142, 13152), False, 'import pyvista\n'), ((13478, 13580), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 1\n ], [0, 1, 1]]'], {}), '([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1],\n [1, 1, 1], [0, 1, 1]])\n', (13486, 13580), True, 'import numpy as np\n'), ((13749, 13770), 'pyvista.UniformGrid', 'pyvista.UniformGrid', ([], {}), '()\n', (13768, 13770), False, 'import pyvista\n'), ((14166, 14191), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', ([], {}), '()\n', (14189, 14191), False, 'import pyvista\n'), ((14394, 14425), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['sgrid'], {}), '(sgrid)\n', (14418, 14425), False, 'import pyvista\n'), ((14643, 14666), 'pyvista.examples.load_uniform', 'examples.load_uniform', ([], {}), '()\n', (14664, 14666), False, 'from pyvista import examples\n'), ((422, 447), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (437, 447), False, 'import os\n'), ((653, 679), 'pyvista.plotting.system_supports_plotting', 'system_supports_plotting', ([], {}), '()\n', (677, 679), False, 'from pyvista.plotting import system_supports_plotting\n'), ((1384, 1418), 'numpy.any', 'np.any', (['(grid.points == beam.points)'], {}), '(grid.points == beam.points)\n', (1390, 1418), True, 'import numpy as np\n'), ((1456, 1480), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1469, 1480), False, 'import pytest\n'), ((1554, 1578), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1567, 1578), False, 'import pytest\n'), ((3819, 3843), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3832, 3843), False, 'import pytest\n'), ((3860, 3894), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['filename'], {}), '(filename)\n', (3884, 3894), False, 'import pyvista\n'), ((3905, 3929), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3918, 3929), False, 'import pytest\n'), ((3946, 3984), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['"""not a file"""'], {}), "('not a file')\n", (3970, 3984), False, 'import pyvista\n'), ((4027, 4051), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4040, 4051), False, 'import pytest\n'), ((4068, 4104), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['"""file.abc"""'], {}), "('file.abc')\n", (4092, 4104), False, 'import pyvista\n'), ((6093, 6117), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6106, 6117), False, 'import pytest\n'), ((6134, 6165), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (6156, 6165), False, 'import pyvista\n'), ((6844, 6868), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6857, 6868), False, 'import pytest\n'), ((6878, 6914), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['"""not a file"""'], {}), "('not a file')\n", (6900, 6914), False, 'import pyvista\n'), ((6980, 7004), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6993, 7004), False, 'import pytest\n'), ((7021, 7053), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['filename'], {}), '(filename)\n', (7043, 7053), False, 'import pyvista\n'), ((9280, 9322), 'numpy.allclose', 'np.allclose', (['structured.point_arrays[k]', 'v'], {}), '(structured.point_arrays[k], v)\n', (9291, 9322), True, 'import numpy as np\n'), ((9380, 9421), 'numpy.allclose', 'np.allclose', (['structured.cell_arrays[k]', 'v'], {}), '(structured.cell_arrays[k], v)\n', (9391, 9421), True, 'import numpy as np\n'), ((13935, 13965), 'numpy.unique', 'np.unique', (['grid.points'], {'axis': '(0)'}), '(grid.points, axis=0)\n', (13944, 13965), True, 'import numpy as np\n'), ((13967, 13992), 'numpy.unique', 'np.unique', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (13976, 13992), True, 'import numpy as np\n'), ((14058, 14081), 'numpy.unique', 'np.unique', (['opts'], {'axis': '(0)'}), '(opts, axis=0)\n', (14067, 14081), True, 'import numpy as np\n'), ((14083, 14108), 'numpy.unique', 'np.unique', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (14092, 14108), True, 'import numpy as np\n'), ((14280, 14310), 'numpy.unique', 'np.unique', (['grid.points'], {'axis': '(0)'}), '(grid.points, axis=0)\n', (14289, 14310), True, 'import numpy as np\n'), ((14312, 14337), 'numpy.unique', 'np.unique', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (14321, 14337), True, 'import numpy as np\n'), ((14919, 14959), 'numpy.all', 'np.all', (['(uniform.active_scalars == values)'], {}), '(uniform.active_scalars == values)\n', (14925, 14959), True, 'import numpy as np\n'), ((15185, 15225), 'numpy.all', 'np.all', (['(uniform.active_scalars == values)'], {}), '(uniform.active_scalars == values)\n', (15191, 15225), True, 'import numpy as np\n'), ((1531, 1542), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1539, 1542), True, 'import numpy as np\n'), ((1629, 1640), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1637, 1640), True, 'import numpy as np\n'), ((1691, 1702), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1699, 1702), True, 'import numpy as np\n'), ((1753, 1764), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1761, 1764), True, 'import numpy as np\n'), ((2598, 2623), 'numpy.vstack', 'np.vstack', (['(cell1, cell2)'], {}), '((cell1, cell2))\n', (2607, 2623), True, 'import numpy as np\n')]
|
import warnings
warnings.simplefilter('ignore')
import argparse
import pickle
import numpy as np
import pandas as pd
import networkx as nx
import scipy.sparse as sp
from network_propagation_methods import minprop_2
from sklearn.metrics import roc_auc_score, auc
import matplotlib.pyplot as plt
#### Parameters #############
parser = argparse.ArgumentParser(description='Runs MINProp')
parser.add_argument('--alphaP', type=float, default=0.25, help='diffusion parameter for the protein-protein interaction network')
parser.add_argument('--alphaD', type=float, default=0.25, help='diffusion parameter for the disease similarity network')
parser.add_argument('--max_iter', type=int, default=1000, help='maximum number of iterations')
parser.add_argument('--eps', type=float, default=1.0e-6, help='convergence threshold')
parser.add_argument('--dir_data', type=str, default='./data/', help='directory of pickled network data')
args = parser.parse_args()
#### load data ############
### protein-protein interaction network
with open(args.dir_data + 'norm_adj_networkP.pickle', mode='rb') as f:
norm_adj_networkP = pickle.load(f)
nb_proteins = norm_adj_networkP.shape[0]
### disease similarity network
with open(args.dir_data + 'adj_networkD.pickle', mode='rb') as f:
adj_networkD = pickle.load(f)
nb_diseases = adj_networkD.shape[0]
# normalized adjacency matrix
deg_networkD = np.sum(adj_networkD, axis=0)
norm_adj_networkD = sp.csr_matrix(adj_networkD / np.sqrt(np.dot(deg_networkD.T, deg_networkD)), dtype=np.float64)
del(adj_networkD)
del(deg_networkD)
### protein-disease network (data used in PRINCE study)
with open(args.dir_data + 'biadj_networkPD.pickle', mode='rb') as f:
biadj_networkPD = pickle.load(f)
# get the list of protein-disease pairs
PD_pairs = biadj_networkPD.nonzero()
# number of protein-disease pairs
nb_PD_pairs = len(PD_pairs[0])
#### Network propagation MINProp ###########################
roc_value_set = np.array([], dtype=np.float64)
rankings = np.array([], dtype=np.int64)
for i in range(nb_PD_pairs):
# leave-one-out validation
# remove a protein-disease association
idx_P = PD_pairs[0][i]
idx_D = PD_pairs[1][i]
biadj_networkPD[idx_P, idx_D] = 0.0
biadj_networkPD.eliminate_zeros()
# normalized biadjacency matrix (ToDo: faster implementation)
degP = np.sum(biadj_networkPD, axis=1)
degD = np.sum(biadj_networkPD, axis=0)
norm_biadj_networkPD = sp.csr_matrix(biadj_networkPD / np.sqrt(np.dot(degP, degD)), dtype=np.float64)
norm_biadj_networkPD.data[np.isnan(norm_biadj_networkPD.data)] = 0.0
norm_biadj_networkPD.eliminate_zeros()
# set initial label
yP = np.zeros(nb_proteins, dtype=np.float64)
yD = np.zeros(nb_diseases, dtype=np.float64)
yD[idx_D] = 1.0
# propagation
fP, fD, convergent = minprop_2(norm_adj_networkP, norm_adj_networkD, norm_biadj_networkPD, yP, yD, args.alphaP, args.alphaD, args.eps, args.max_iter)
# ranking
labels_real = np.zeros(nb_proteins)
labels_real[idx_P] = 1
rank = int(np.where(labels_real[np.argsort(-fP)]==1)[0]) + 1
rankings = np.append(rankings, rank)
# get AUC value
roc_value = roc_auc_score(labels_real, fP)
print(i, "AUC:", roc_value, convergent)
roc_value_set = np.append(roc_value_set, roc_value)
# reassign the protein-disease association
biadj_networkPD[idx_P, idx_D] = 1.0
print("Average AUC", np.mean(roc_value_set))
# compute sensitivity and top rate (ROC-like curve)
# ToDo: faster implementation
sen_set = np.array([], dtype=np.float64)
top_rate_set = np.array([], dtype=np.float64)
for k in range(nb_proteins):
# sensitibity
sen = (rankings <= (k+1)).sum() / nb_PD_pairs
# top rate
top_rate = (k + 1) / nb_proteins
sen_set = np.append(sen_set, sen)
top_rate_set = np.append(top_rate_set, top_rate)
# get AUC value
print("Summarized AUC", auc(top_rate_set, sen_set))
# plot ROC-like curve
plt.scatter(top_rate_set, sen_set)
plt.show()
|
[
"network_propagation_methods.minprop_2",
"numpy.mean",
"argparse.ArgumentParser",
"sklearn.metrics.auc",
"pickle.load",
"sklearn.metrics.roc_auc_score",
"numpy.append",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"numpy.dot",
"matplotlib.pyplot.scatter",
"numpy.argsort",
"warnings.simplefilter",
"matplotlib.pyplot.show"
] |
[((16, 47), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (37, 47), False, 'import warnings\n'), ((335, 386), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Runs MINProp"""'}), "(description='Runs MINProp')\n", (358, 386), False, 'import argparse\n'), ((1386, 1414), 'numpy.sum', 'np.sum', (['adj_networkD'], {'axis': '(0)'}), '(adj_networkD, axis=0)\n', (1392, 1414), True, 'import numpy as np\n'), ((1949, 1979), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (1957, 1979), True, 'import numpy as np\n'), ((1991, 2019), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (1999, 2019), True, 'import numpy as np\n'), ((3523, 3553), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (3531, 3553), True, 'import numpy as np\n'), ((3569, 3599), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (3577, 3599), True, 'import numpy as np\n'), ((3937, 3971), 'matplotlib.pyplot.scatter', 'plt.scatter', (['top_rate_set', 'sen_set'], {}), '(top_rate_set, sen_set)\n', (3948, 3971), True, 'import matplotlib.pyplot as plt\n'), ((3972, 3982), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3980, 3982), True, 'import matplotlib.pyplot as plt\n'), ((1116, 1130), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1127, 1130), False, 'import pickle\n'), ((1289, 1303), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1300, 1303), False, 'import pickle\n'), ((1713, 1727), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1724, 1727), False, 'import pickle\n'), ((2332, 2363), 'numpy.sum', 'np.sum', (['biadj_networkPD'], {'axis': '(1)'}), '(biadj_networkPD, axis=1)\n', (2338, 2363), True, 'import numpy as np\n'), ((2375, 2406), 'numpy.sum', 'np.sum', (['biadj_networkPD'], {'axis': '(0)'}), '(biadj_networkPD, axis=0)\n', (2381, 2406), True, 'import numpy as np\n'), ((2662, 2701), 'numpy.zeros', 'np.zeros', (['nb_proteins'], {'dtype': 'np.float64'}), '(nb_proteins, dtype=np.float64)\n', (2670, 2701), True, 'import numpy as np\n'), ((2711, 2750), 'numpy.zeros', 'np.zeros', (['nb_diseases'], {'dtype': 'np.float64'}), '(nb_diseases, dtype=np.float64)\n', (2719, 2750), True, 'import numpy as np\n'), ((2814, 2946), 'network_propagation_methods.minprop_2', 'minprop_2', (['norm_adj_networkP', 'norm_adj_networkD', 'norm_biadj_networkPD', 'yP', 'yD', 'args.alphaP', 'args.alphaD', 'args.eps', 'args.max_iter'], {}), '(norm_adj_networkP, norm_adj_networkD, norm_biadj_networkPD, yP,\n yD, args.alphaP, args.alphaD, args.eps, args.max_iter)\n', (2823, 2946), False, 'from network_propagation_methods import minprop_2\n'), ((2975, 2996), 'numpy.zeros', 'np.zeros', (['nb_proteins'], {}), '(nb_proteins)\n', (2983, 2996), True, 'import numpy as np\n'), ((3104, 3129), 'numpy.append', 'np.append', (['rankings', 'rank'], {}), '(rankings, rank)\n', (3113, 3129), True, 'import numpy as np\n'), ((3166, 3196), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels_real', 'fP'], {}), '(labels_real, fP)\n', (3179, 3196), False, 'from sklearn.metrics import roc_auc_score, auc\n'), ((3261, 3296), 'numpy.append', 'np.append', (['roc_value_set', 'roc_value'], {}), '(roc_value_set, roc_value)\n', (3270, 3296), True, 'import numpy as np\n'), ((3406, 3428), 'numpy.mean', 'np.mean', (['roc_value_set'], {}), '(roc_value_set)\n', (3413, 3428), True, 'import numpy as np\n'), ((3768, 3791), 'numpy.append', 'np.append', (['sen_set', 'sen'], {}), '(sen_set, sen)\n', (3777, 3791), True, 'import numpy as np\n'), ((3811, 3844), 'numpy.append', 'np.append', (['top_rate_set', 'top_rate'], {}), '(top_rate_set, top_rate)\n', (3820, 3844), True, 'import numpy as np\n'), ((3886, 3912), 'sklearn.metrics.auc', 'auc', (['top_rate_set', 'sen_set'], {}), '(top_rate_set, sen_set)\n', (3889, 3912), False, 'from sklearn.metrics import roc_auc_score, auc\n'), ((2543, 2578), 'numpy.isnan', 'np.isnan', (['norm_biadj_networkPD.data'], {}), '(norm_biadj_networkPD.data)\n', (2551, 2578), True, 'import numpy as np\n'), ((1472, 1508), 'numpy.dot', 'np.dot', (['deg_networkD.T', 'deg_networkD'], {}), '(deg_networkD.T, deg_networkD)\n', (1478, 1508), True, 'import numpy as np\n'), ((2474, 2492), 'numpy.dot', 'np.dot', (['degP', 'degD'], {}), '(degP, degD)\n', (2480, 2492), True, 'import numpy as np\n'), ((3060, 3075), 'numpy.argsort', 'np.argsort', (['(-fP)'], {}), '(-fP)\n', (3070, 3075), True, 'import numpy as np\n')]
|
########################################################################################################################
# #
# This file is part of kAIvy #
# #
# Copyright (c) 2019-2021 by the kAIvy team and contributors #
# #
########################################################################################################################
import numpy as np
from kaivy.geometry.geometry2d import Geometry2D
from kaivy.geometry.transformation2d import Transformation2D
from kivy.graphics import Line, SmoothLine, Color
class Line2D(Geometry2D):
"""
Defines a simple line defined by two points
"""
def __init__(self, points, width=1.0, color=(1.0, 1.0, 1.0, 1.0)):
"""
Initializer
:param points: The line's points
"""
super().__init__()
self.geometry_class_name = 'Line2D'
self.set_nodes(np.array(points))
self.smooth = True
self.color = color
self.width = width
def render_to_kivy(self, target, transformation: Transformation2D, parameters={}, geometry_out=None):
color = parameters.get('color', self.color)
target.add(Color(*color))
nodes = transformation.transform(self.nodes)
if geometry_out is not None:
if self.GO_TAG_LINE_LIST not in geometry_out: # add line array if still missing
geometry_out[self.GO_TAG_LINE_LIST] = []
geometry_out[self.GO_TAG_LINE_LIST].append({self.GO_TAG_OWNER: self, self.GO_TAG_LINE_LIST_LINES: nodes})
nodes = nodes.flatten().tolist()
if self.smooth:
target.add(SmoothLine(points=nodes, width=self.width))
else:
target.add(Line(points=nodes, width=self.width))
def distance_to_point(self, point, ray=False):
"""
Returns the distance between this line and given point
:param point: A 2D coordinate
:param ray: Defines if the line defines an unbound ray
"""
return self.line_distance_to_point(self.nodes, point, ray=ray)
@staticmethod
def line_distance_to_point(point_list, point, ray=False):
"""
Returns the distance from line p1 p2 and a given point point
:param point_list: The line's points as numpy array
:param point: A 2D coordinate
:param ray: Defines if the line defines an unbound ray
:return: The distance to the point and the nearest point. None, None if line is invalid
"""
# two points define the line
n = (point_list[1] - point_list[0])
if np.sum(n) == 0:
return None, None
line_length = np.linalg.norm(n)
n = n / line_length
ap = point - point_list[0]
t = ap.dot(n)
if not ray:
t = min(max(t, 0), line_length)
x = point_list[0] + t * n
# d = (np.cross(ap, n) ** 2).sum()**0.5
return ((point - x) ** 2).sum() ** 0.5, x
def to_dict(self, options): # Overrides Geometry2D to_dict
result = super().to_dict(options)
if options.get(self.OPTION_VISUAL_DETAILS, True):
result['width'] = self.width
result['smooth'] = self.smooth
return result
|
[
"kivy.graphics.Line",
"kivy.graphics.SmoothLine",
"numpy.linalg.norm",
"numpy.sum",
"numpy.array",
"kivy.graphics.Color"
] |
[((3148, 3165), 'numpy.linalg.norm', 'np.linalg.norm', (['n'], {}), '(n)\n', (3162, 3165), True, 'import numpy as np\n'), ((1386, 1402), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1394, 1402), True, 'import numpy as np\n'), ((1663, 1676), 'kivy.graphics.Color', 'Color', (['*color'], {}), '(*color)\n', (1668, 1676), False, 'from kivy.graphics import Line, SmoothLine, Color\n'), ((3080, 3089), 'numpy.sum', 'np.sum', (['n'], {}), '(n)\n', (3086, 3089), True, 'import numpy as np\n'), ((2127, 2169), 'kivy.graphics.SmoothLine', 'SmoothLine', ([], {'points': 'nodes', 'width': 'self.width'}), '(points=nodes, width=self.width)\n', (2137, 2169), False, 'from kivy.graphics import Line, SmoothLine, Color\n'), ((2208, 2244), 'kivy.graphics.Line', 'Line', ([], {'points': 'nodes', 'width': 'self.width'}), '(points=nodes, width=self.width)\n', (2212, 2244), False, 'from kivy.graphics import Line, SmoothLine, Color\n')]
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import openvino.runtime.opset9 as ov
import numpy as np
import pytest
from tests.runtime import get_runtime
from openvino.runtime.utils.types import get_element_type_str
from openvino.runtime.utils.types import get_element_type
@pytest.mark.parametrize(
"num_rows, num_columns, diagonal_index, out_type",
[
pytest.param(2, 5, 0, np.float32),
pytest.param(5, 3, 2, np.int64),
pytest.param(3, 3, -1, np.float16),
pytest.param(5, 5, -10, np.float32),
],
)
def test_eye_rectangle(num_rows, num_columns, diagonal_index, out_type):
num_rows_array = np.array([num_rows], np.int32)
num_columns_array = np.array([num_columns], np.int32)
diagonal_index_array = np.array([diagonal_index], np.int32)
num_rows_tensor = ov.constant(num_rows_array)
num_columns_tensor = ov.constant(num_columns_array)
diagonal_index_tensor = ov.constant(diagonal_index_array)
# Create with param names
eye_node = ov.eye(num_rows=num_rows_tensor,
num_columns=num_columns_tensor,
diagonal_index=diagonal_index_tensor,
output_type=get_element_type_str(out_type))
# Create with default orded
eye_node = ov.eye(num_rows_tensor,
num_columns_tensor,
diagonal_index_tensor,
get_element_type_str(out_type))
expected_results = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)
assert eye_node.get_type_name() == "Eye"
assert eye_node.get_output_size() == 1
assert eye_node.get_output_element_type(0) == get_element_type(out_type)
assert tuple(eye_node.get_output_shape(0)) == expected_results.shape
# TODO: Enable with Eye reference implementation
# runtime = get_runtime()
# computation = runtime.computation(eye_node)
# eye_results = computation()
# assert np.allclose(eye_results, expected_results)
@pytest.mark.parametrize(
"num_rows, num_columns, diagonal_index, batch_shape, out_type",
[
pytest.param(2, 5, 0, [1], np.float32),
pytest.param(5, 3, 2, [2, 2], np.int64),
pytest.param(3, 3, -1, [1, 3, 2], np.float16),
pytest.param(5, 5, -10, [1, 1], np.float32),
],
)
def test_eye_batch_shape(num_rows, num_columns, diagonal_index, batch_shape, out_type):
num_rows_array = np.array([num_rows], np.int32)
num_columns_array = np.array([num_columns], np.int32)
diagonal_index_array = np.array([diagonal_index], np.int32)
batch_shape_array = np.array(batch_shape, np.int32)
num_rows_tensor = ov.constant(num_rows_array)
num_columns_tensor = ov.constant(num_columns_array)
diagonal_index_tensor = ov.constant(diagonal_index_array)
batch_shape_tensor = ov.constant(batch_shape_array)
# Create with param names
eye_node = ov.eye(num_rows=num_rows_tensor,
num_columns=num_columns_tensor,
diagonal_index=diagonal_index_tensor,
batch_shape=batch_shape_tensor,
output_type=get_element_type_str(out_type))
# Create with default orded
eye_node = ov.eye(num_rows_tensor,
num_columns_tensor,
diagonal_index_tensor,
get_element_type_str(out_type),
batch_shape_tensor)
output_shape = [*batch_shape, 1, 1]
one_matrix = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)
expected_results = np.tile(one_matrix, output_shape)
assert eye_node.get_type_name() == "Eye"
assert eye_node.get_output_size() == 1
assert eye_node.get_output_element_type(0) == get_element_type(out_type)
assert tuple(eye_node.get_output_shape(0)) == expected_results.shape
# TODO: Enable with Eye reference implementation
# runtime = get_runtime()
# computation = runtime.computation(eye_node)
# eye_results = computation()
# assert np.allclose(eye_results, expected_results)
|
[
"numpy.tile",
"numpy.eye",
"pytest.param",
"numpy.array",
"openvino.runtime.utils.types.get_element_type",
"openvino.runtime.opset9.constant",
"openvino.runtime.utils.types.get_element_type_str"
] |
[((677, 707), 'numpy.array', 'np.array', (['[num_rows]', 'np.int32'], {}), '([num_rows], np.int32)\n', (685, 707), True, 'import numpy as np\n'), ((732, 765), 'numpy.array', 'np.array', (['[num_columns]', 'np.int32'], {}), '([num_columns], np.int32)\n', (740, 765), True, 'import numpy as np\n'), ((793, 829), 'numpy.array', 'np.array', (['[diagonal_index]', 'np.int32'], {}), '([diagonal_index], np.int32)\n', (801, 829), True, 'import numpy as np\n'), ((852, 879), 'openvino.runtime.opset9.constant', 'ov.constant', (['num_rows_array'], {}), '(num_rows_array)\n', (863, 879), True, 'import openvino.runtime.opset9 as ov\n'), ((905, 935), 'openvino.runtime.opset9.constant', 'ov.constant', (['num_columns_array'], {}), '(num_columns_array)\n', (916, 935), True, 'import openvino.runtime.opset9 as ov\n'), ((964, 997), 'openvino.runtime.opset9.constant', 'ov.constant', (['diagonal_index_array'], {}), '(diagonal_index_array)\n', (975, 997), True, 'import openvino.runtime.opset9 as ov\n'), ((1494, 1561), 'numpy.eye', 'np.eye', (['num_rows'], {'M': 'num_columns', 'k': 'diagonal_index', 'dtype': 'np.float32'}), '(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)\n', (1500, 1561), True, 'import numpy as np\n'), ((2450, 2480), 'numpy.array', 'np.array', (['[num_rows]', 'np.int32'], {}), '([num_rows], np.int32)\n', (2458, 2480), True, 'import numpy as np\n'), ((2505, 2538), 'numpy.array', 'np.array', (['[num_columns]', 'np.int32'], {}), '([num_columns], np.int32)\n', (2513, 2538), True, 'import numpy as np\n'), ((2566, 2602), 'numpy.array', 'np.array', (['[diagonal_index]', 'np.int32'], {}), '([diagonal_index], np.int32)\n', (2574, 2602), True, 'import numpy as np\n'), ((2627, 2658), 'numpy.array', 'np.array', (['batch_shape', 'np.int32'], {}), '(batch_shape, np.int32)\n', (2635, 2658), True, 'import numpy as np\n'), ((2681, 2708), 'openvino.runtime.opset9.constant', 'ov.constant', (['num_rows_array'], {}), '(num_rows_array)\n', (2692, 2708), True, 'import openvino.runtime.opset9 as ov\n'), ((2734, 2764), 'openvino.runtime.opset9.constant', 'ov.constant', (['num_columns_array'], {}), '(num_columns_array)\n', (2745, 2764), True, 'import openvino.runtime.opset9 as ov\n'), ((2793, 2826), 'openvino.runtime.opset9.constant', 'ov.constant', (['diagonal_index_array'], {}), '(diagonal_index_array)\n', (2804, 2826), True, 'import openvino.runtime.opset9 as ov\n'), ((2852, 2882), 'openvino.runtime.opset9.constant', 'ov.constant', (['batch_shape_array'], {}), '(batch_shape_array)\n', (2863, 2882), True, 'import openvino.runtime.opset9 as ov\n'), ((3509, 3576), 'numpy.eye', 'np.eye', (['num_rows'], {'M': 'num_columns', 'k': 'diagonal_index', 'dtype': 'np.float32'}), '(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)\n', (3515, 3576), True, 'import numpy as np\n'), ((3600, 3633), 'numpy.tile', 'np.tile', (['one_matrix', 'output_shape'], {}), '(one_matrix, output_shape)\n', (3607, 3633), True, 'import numpy as np\n'), ((1438, 1468), 'openvino.runtime.utils.types.get_element_type_str', 'get_element_type_str', (['out_type'], {}), '(out_type)\n', (1458, 1468), False, 'from openvino.runtime.utils.types import get_element_type_str\n'), ((1701, 1727), 'openvino.runtime.utils.types.get_element_type', 'get_element_type', (['out_type'], {}), '(out_type)\n', (1717, 1727), False, 'from openvino.runtime.utils.types import get_element_type\n'), ((409, 442), 'pytest.param', 'pytest.param', (['(2)', '(5)', '(0)', 'np.float32'], {}), '(2, 5, 0, np.float32)\n', (421, 442), False, 'import pytest\n'), ((452, 483), 'pytest.param', 'pytest.param', (['(5)', '(3)', '(2)', 'np.int64'], {}), '(5, 3, 2, np.int64)\n', (464, 483), False, 'import pytest\n'), ((493, 527), 'pytest.param', 'pytest.param', (['(3)', '(3)', '(-1)', 'np.float16'], {}), '(3, 3, -1, np.float16)\n', (505, 527), False, 'import pytest\n'), ((537, 572), 'pytest.param', 'pytest.param', (['(5)', '(5)', '(-10)', 'np.float32'], {}), '(5, 5, -10, np.float32)\n', (549, 572), False, 'import pytest\n'), ((3377, 3407), 'openvino.runtime.utils.types.get_element_type_str', 'get_element_type_str', (['out_type'], {}), '(out_type)\n', (3397, 3407), False, 'from openvino.runtime.utils.types import get_element_type_str\n'), ((3773, 3799), 'openvino.runtime.utils.types.get_element_type', 'get_element_type', (['out_type'], {}), '(out_type)\n', (3789, 3799), False, 'from openvino.runtime.utils.types import get_element_type\n'), ((2135, 2173), 'pytest.param', 'pytest.param', (['(2)', '(5)', '(0)', '[1]', 'np.float32'], {}), '(2, 5, 0, [1], np.float32)\n', (2147, 2173), False, 'import pytest\n'), ((2183, 2222), 'pytest.param', 'pytest.param', (['(5)', '(3)', '(2)', '[2, 2]', 'np.int64'], {}), '(5, 3, 2, [2, 2], np.int64)\n', (2195, 2222), False, 'import pytest\n'), ((2232, 2277), 'pytest.param', 'pytest.param', (['(3)', '(3)', '(-1)', '[1, 3, 2]', 'np.float16'], {}), '(3, 3, -1, [1, 3, 2], np.float16)\n', (2244, 2277), False, 'import pytest\n'), ((2287, 2330), 'pytest.param', 'pytest.param', (['(5)', '(5)', '(-10)', '[1, 1]', 'np.float32'], {}), '(5, 5, -10, [1, 1], np.float32)\n', (2299, 2330), False, 'import pytest\n'), ((1225, 1255), 'openvino.runtime.utils.types.get_element_type_str', 'get_element_type_str', (['out_type'], {}), '(out_type)\n', (1245, 1255), False, 'from openvino.runtime.utils.types import get_element_type_str\n'), ((3164, 3194), 'openvino.runtime.utils.types.get_element_type_str', 'get_element_type_str', (['out_type'], {}), '(out_type)\n', (3184, 3194), False, 'from openvino.runtime.utils.types import get_element_type_str\n')]
|
# --------------------------------------------------------
# (c) Copyright 2014 by <NAME>.
# Licensed under BSD 3-clause licence.
# --------------------------------------------------------
import unittest
from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing
from pymonad.Reader import curry
from pymonad.test.MonadTester import *
from pymonad.test.MonoidTester import *
class TestJustFunctor(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestJustFunctor, self).__init__(x)
self.setClassUnderTest(Just)
def testFunctorLaws(self):
self.given(8)
self.ensure_first_functor_law_holds()
self.ensure_second_functor_law_holds()
class TestNothingFunctor(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestNothingFunctor, self).__init__(x)
self.setClassUnderTest(_Nothing)
def testFunctorLaws(self):
self.given(None)
self.ensure_first_functor_law_holds()
self.ensure_second_functor_law_holds()
class TestJustApplicative(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestJustApplicative, self).__init__(x)
self.setClassUnderTest(Just)
def testApplicativeLaws(self):
self.given(8)
self.ensure_first_applicative_law_holds()
self.ensure_second_applicative_law_holds()
self.ensure_third_applicative_law_holds()
self.ensure_fourth_applicative_law_holds()
self.ensure_fifth_applicative_law_holds()
class TestNothingApplicative(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestNothingApplicative, self).__init__(x)
self.setClassUnderTest(_Nothing)
def testApplicativeLaws(self):
self.given(None)
self.ensure_first_applicative_law_holds()
self.ensure_second_applicative_law_holds()
self.ensure_third_applicative_law_holds()
self.ensure_fourth_applicative_law_holds()
self.ensure_fifth_applicative_law_holds()
class TestJustMonad(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestJustMonad, self).__init__(x)
self.setClassUnderTest(Just)
def monad_function_f(self, x):
return Just(x + 10)
def monad_function_g(self, x):
return Just(x * 5)
def testMonadLaws(self):
self.given(8)
self.ensure_first_monad_law_holds()
self.ensure_second_monad_law_holds()
self.ensure_third_monad_law_holds()
class TestNothingMonad(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestNothingMonad, self).__init__(x)
self.setClassUnderTest(_Nothing)
def monad_function_f(self, x):
return Just(x + 10)
def monad_function_g(self, x):
return Just(x * 5)
def testMonadLaws(self):
self.given(None)
self.ensure_first_monad_law_holds()
self.ensure_second_monad_law_holds()
self.ensure_third_monad_law_holds()
class TestMaybeEquality(unittest.TestCase, MonadTester):
def testEqualityOfIdenticalTypes(self):
self.givenMonads(Just(8), Just(8))
self.ensureMonadsAreEqual()
def testInequalityOfIdenticalTypes(self):
self.givenMonads(Just(8), Just(9))
self.ensureMonadsAreNotEqual()
def testInequalityOfJustAndNothing(self):
self.givenMonads(Just(8), Nothing)
self.ensureMonadsAreNotEqual()
def testMonadComparisonExceptionWithJust(self):
self.givenMonads(Just(8), Reader(8))
self.ensureComparisonRaisesException()
def testMonadComparisonExceptionWithNothing(self):
self.givenMonads(Nothing, Reader(8))
self.ensureComparisonRaisesException()
class TestMaybeMonoid(unittest.TestCase, MonoidTester):
def test_mzero(self):
self.givenMonoid(Maybe)
self.get_mzero()
self.ensure_mzero_is(Nothing)
def test_right_identity(self):
self.givenMonoid(Just(9))
self.ensure_monoid_plus_zero_equals(Just(9))
def test_left_identity(self):
self.givenMonoid(Just(9))
self.ensure_zero_plus_monoid_equals(Just(9))
def test_associativity(self):
self.givenMonoids(Just(1), Just(2), Just(3))
self.ensure_associativity()
def test_mplus_with_two_just_values(self):
self.givenMonoids(Just(1), Just(2))
self.ensure_mconcat_equals(Just(3))
def test_mplus_with_one_just_and_one_nothing(self):
self.givenMonoids(Just(1), Nothing)
self.ensure_mconcat_equals(Just(1))
class TestFirstMonoid(unittest.TestCase, MonoidTester):
def test_mzero(self):
self.givenMonoid(First)
self.get_mzero()
self.ensure_mzero_is(First(Nothing))
def test_right_identity(self):
self.givenMonoid(First(Just(9)))
self.ensure_monoid_plus_zero_equals(First(Just(9)))
def test_left_identity(self):
self.givenMonoid(First(Just(9)))
self.ensure_zero_plus_monoid_equals(First(Just(9)))
def test_associativity(self):
self.givenMonoids(First(Just(1)), First(Just(2)), First(Just(3)))
self.ensure_associativity()
def test_mplus_with_two_just_values(self):
self.givenMonoids(First(Just(1)), First(Just(2)))
self.ensure_mconcat_equals(First(Just(1)))
def test_mplus_with_just_and_nothing(self):
self.givenMonoids(First(Just(1)), Nothing)
self.ensure_mconcat_equals(First(Just(1)))
def test_mplus_with_nothing_and_just(self):
self.givenMonoids(Nothing, First(Just(1)))
self.ensure_mconcat_equals(First(Just(1)))
class TestLastMonoid(unittest.TestCase, MonoidTester):
def test_mzero(self):
self.givenMonoid(Last)
self.get_mzero()
self.ensure_mzero_is(Last(Nothing))
def test_right_identity(self):
self.givenMonoid(Last(Just(9)))
self.ensure_monoid_plus_zero_equals(Last(Just(9)))
def test_left_identity(self):
self.givenMonoid(Last(Just(9)))
self.ensure_zero_plus_monoid_equals(Last(Just(9)))
def test_associativity(self):
self.givenMonoids(Last(Just(1)), Last(Just(2)), Last(Just(3)))
self.ensure_associativity()
def test_mplus_with_two_just_values(self):
self.givenMonoids(Last(Just(1)), Last(Just(2)))
self.ensure_mconcat_equals(Last(Just(2)))
def test_mplus_with_just_and_nothing(self):
self.givenMonoids(Last(Just(1)), Nothing)
self.ensure_mconcat_equals(Last(Just(1)))
def test_mplus_with_nothing_and_just(self):
self.givenMonoids(Nothing, Last(Just(1)))
self.ensure_mconcat_equals(Last(Just(1)))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"pymonad.Maybe.First",
"pymonad.Maybe.Last",
"pymonad.Maybe.Just"
] |
[((6686, 6701), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6699, 6701), False, 'import unittest\n'), ((2236, 2248), 'pymonad.Maybe.Just', 'Just', (['(x + 10)'], {}), '(x + 10)\n', (2240, 2248), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((2300, 2311), 'pymonad.Maybe.Just', 'Just', (['(x * 5)'], {}), '(x * 5)\n', (2304, 2311), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((2723, 2735), 'pymonad.Maybe.Just', 'Just', (['(x + 10)'], {}), '(x + 10)\n', (2727, 2735), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((2787, 2798), 'pymonad.Maybe.Just', 'Just', (['(x * 5)'], {}), '(x * 5)\n', (2791, 2798), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((3114, 3121), 'pymonad.Maybe.Just', 'Just', (['(8)'], {}), '(8)\n', (3118, 3121), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((3123, 3130), 'pymonad.Maybe.Just', 'Just', (['(8)'], {}), '(8)\n', (3127, 3130), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((3240, 3247), 'pymonad.Maybe.Just', 'Just', (['(8)'], {}), '(8)\n', (3244, 3247), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((3249, 3256), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (3253, 3256), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((3369, 3376), 'pymonad.Maybe.Just', 'Just', (['(8)'], {}), '(8)\n', (3373, 3376), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((3504, 3511), 'pymonad.Maybe.Just', 'Just', (['(8)'], {}), '(8)\n', (3508, 3511), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((3958, 3965), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (3962, 3965), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4011, 4018), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (4015, 4018), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4080, 4087), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (4084, 4087), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4133, 4140), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (4137, 4140), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4203, 4210), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (4207, 4210), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4212, 4219), 'pymonad.Maybe.Just', 'Just', (['(2)'], {}), '(2)\n', (4216, 4219), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4221, 4228), 'pymonad.Maybe.Just', 'Just', (['(3)'], {}), '(3)\n', (4225, 4228), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4340, 4347), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (4344, 4347), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4349, 4356), 'pymonad.Maybe.Just', 'Just', (['(2)'], {}), '(2)\n', (4353, 4356), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4393, 4400), 'pymonad.Maybe.Just', 'Just', (['(3)'], {}), '(3)\n', (4397, 4400), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4485, 4492), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (4489, 4492), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4538, 4545), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (4542, 4545), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4716, 4730), 'pymonad.Maybe.First', 'First', (['Nothing'], {}), '(Nothing)\n', (4721, 4730), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5776, 5789), 'pymonad.Maybe.Last', 'Last', (['Nothing'], {}), '(Nothing)\n', (5780, 5789), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4799, 4806), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (4803, 4806), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4859, 4866), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (4863, 4866), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4935, 4942), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (4939, 4942), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((4995, 5002), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (4999, 5002), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5072, 5079), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (5076, 5079), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5088, 5095), 'pymonad.Maybe.Just', 'Just', (['(2)'], {}), '(2)\n', (5092, 5095), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5104, 5111), 'pymonad.Maybe.Just', 'Just', (['(3)'], {}), '(3)\n', (5108, 5111), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5230, 5237), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (5234, 5237), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5246, 5253), 'pymonad.Maybe.Just', 'Just', (['(2)'], {}), '(2)\n', (5250, 5253), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5297, 5304), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (5301, 5304), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5388, 5395), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (5392, 5395), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5448, 5455), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (5452, 5455), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5548, 5555), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (5552, 5555), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5599, 5606), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (5603, 5606), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5857, 5864), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (5861, 5864), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5916, 5923), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (5920, 5923), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((5991, 5998), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (5995, 5998), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6050, 6057), 'pymonad.Maybe.Just', 'Just', (['(9)'], {}), '(9)\n', (6054, 6057), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6126, 6133), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (6130, 6133), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6141, 6148), 'pymonad.Maybe.Just', 'Just', (['(2)'], {}), '(2)\n', (6145, 6148), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6156, 6163), 'pymonad.Maybe.Just', 'Just', (['(3)'], {}), '(3)\n', (6160, 6163), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6281, 6288), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (6285, 6288), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6296, 6303), 'pymonad.Maybe.Just', 'Just', (['(2)'], {}), '(2)\n', (6300, 6303), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6346, 6353), 'pymonad.Maybe.Just', 'Just', (['(2)'], {}), '(2)\n', (6350, 6353), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6436, 6443), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (6440, 6443), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6495, 6502), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (6499, 6502), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6594, 6601), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (6598, 6601), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n'), ((6644, 6651), 'pymonad.Maybe.Just', 'Just', (['(1)'], {}), '(1)\n', (6648, 6651), False, 'from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing\n')]
|
from tkinter import *
from ModeEnum import Mode
import SerialHelper
import Views.StaticView
import Views.CustomWidgets.Silder
from ColorEnum import Color
from functools import partial
from Views.CommandPanel import CommandPanel
from Views.ListItem import ListItem
from ProcessControl import ProcessManager, ProcessCommandEnum
import os, signal
menuBackgroundColor = "#262e30"
menuForegroundColor = "#e5e4c5"
menuActiveForegroundColor = menuForegroundColor
menuActiveBackgroundColor = "#464743"
mainBackgroundColor = "#1b2122"
class App(Frame):
def __init__(self,master=None):
Frame.__init__(self,master)
self.mode = Mode.Static
self.ser = SerialHelper.SerialHelper()
self.test = Views.StaticView.StaticView(self)
self.sliderRed = Views.CustomWidgets.Silder.Silder(self, "Red", color=Color.RED)
self.sliderGreen = Views.CustomWidgets.Silder.Silder(self, "Green", color=Color.GREEN)
self.sliderBlue = Views.CustomWidgets.Silder.Silder(self, "Blue", color=Color.BLUE)
self.grid()
self.createWidgets()
# Restart the RGB controller
#f = open("../config/processctl", "w")
#f.write("controller.py,start")
#f.close()
##ProcessManager.sendCommand("controller.py", ProcessCommandEnum.ProcessCommandEnum.START)
def createWidgets(self):
self.cPanel = CommandPanel()
self.quitButton= Button(self, text="Quit", command=self.quit)
self.quitButton.grid()
self.my_label = Label(self, text="My Label!")
self.my_label.grid()
self.connectedLabel = Label(self, text="Not Connected", foreground='red')
self.connectedLabel.grid()
self.test.grid()
self.tempText = Label(self, text="NONE")
self.tempText.grid()
self.addButton = Button(self, text="Add", command=self.addValues)
self.addButton.grid()
# TODO: change the value to reflect the item selected index
#self.addButton = Button(self, text="Add After Selected", command=partial(self.addValues, self.cPanel.getListItemIndex(self.cPanel._selectedItem)))
# Hacky way of doing this... listItem could be done better
self.addButton = Button(self,
text="Add After Selected",
command=partial(self.addValues, listItem='Not None'))
self.addButton.grid()
# TODO: Add at a random position
self.addButton = Button(self, text="Add At A Random Position", command=partial(self.addValues, random=True))
self.addButton.grid()
# test
self.sliderRed.grid(column=0, row=0)
self.sliderGreen.grid(column=1, row=0)
self.sliderBlue.grid(column=2, row=0)
self.delayAreaFrame = Frame(self)
self.delayAreaFrame.grid(column=3, row=0)
self.fadeValLabel = Label(self.delayAreaFrame, text="Fade Value:")
self.fadeValLabel.grid(column=0, row=0)
self.fadeVal = Entry(self.delayAreaFrame)
self.fadeVal.grid(column=0, row=1)
self.delayValLabel = Label(self.delayAreaFrame, text="Delay Value:")
self.delayValLabel.grid(column=0, row=3)
self.delayVal = Entry(self.delayAreaFrame)
self.delayVal.grid(column=0, row=4)
self.addDelayButton = Button(self.delayAreaFrame, text="Add Delay Value", command=self.addDelayValue)
self.addDelayButton.grid(column=1, row=3, rowspan=2)
self.cPanel.grid(column=4,row = 0)
#self.cPanel.insert(END, ListItem(self.cPanel, "Insert Test 1"))
self.my_menu = Menu(self,
tearoff=0,
activebackground=menuActiveBackgroundColor,
background=menuBackgroundColor,
activeforeground=menuActiveForegroundColor,
foreground=menuForegroundColor
)
#self.fileMenu = Menu(self.my_menu)
#self.fileMenu.add_command(label="Exit", command=self.quit)
self.my_menu.add_cascade(label="File", menu=self.fileMenu(self.my_menu))
self.my_menu.add_cascade(label="Ports", menu=self.portsMenu(self.my_menu))
self.my_menu.add_cascade(label="Mode", menu=self.modeMenu(self.my_menu))
def fileMenu(self, mainMenu):
fileMenu = Menu(mainMenu,
tearoff=0,
activebackground=menuActiveBackgroundColor,
background=menuBackgroundColor,
activeforeground=menuActiveForegroundColor,
foreground=menuForegroundColor
)
fileMenu.add_command(label="Exit", command=self.quit)
return fileMenu
def portsMenu(self, mainMenu):
portsMenu = Menu(mainMenu,
tearoff=0,
activebackground=menuActiveBackgroundColor,
background=menuBackgroundColor,
activeforeground=menuActiveForegroundColor,
foreground=menuForegroundColor
)
for sp in SerialHelper.getSerialPorts():
# Have this be a call to the function and supply the serial port as the arg
functionCall = partial(self.selectPort, sp[0], self.connectedLabel)
portsMenu.add_command(label=sp, command=functionCall)
return portsMenu
def selectPort(self, port, uiElement):
color = 'red'
text = 'Failed'
if self.ser.connect(port):
text = 'Connected on ' + port
color = 'green'
f = open("config/port", "w")
f.write(port)
f.close()
# Restart the RGB controller
##f = open("../config/processctl", "w")
##f.write("controller.py,restart")
##f.close()
ProcessManager.sendCommand("controller.py", ProcessCommandEnum.ProcessCommandEnum.RESTART)
uiElement['foreground'] = color
uiElement['text'] = text
def modeMenu(self, mainMenu):
menu = Menu(mainMenu,
tearoff=0,
activebackground=menuActiveBackgroundColor,
background=menuBackgroundColor,
activeforeground=menuActiveForegroundColor,
foreground=menuForegroundColor
)
for m in Mode:
funcCall = partial(self.changeMode, m)
menu.add_command(label=m, command=funcCall)
return menu
def changeMode(self, mode):
print("Mode changed from: "+ (str) (self.mode) + " to: " + (str) (mode))
self.mode = mode
loopingCondition = os.path.join(os.getcwd(), 'config', 'loopingCondition')
f = open(loopingCondition, 'w')
message = "LOOPING: "
if self.mode == Mode.Dynamic:
message += "TRUE;"
elif self.mode == Mode.Static:
message += "FALSE;"
f.write(message)
f.close()
def parseFadeValue(self):
fadeValStr = self.fadeVal.get()
try:
value = int(fadeValStr)
if value < 1 or value > 255:
print("Delay value out of byte range")
return 1
except ValueError as err:
print(err)
return 1
return value
def addValues(self, listItem=None, index=-1, random=False):
if index is None:
print("Index was None... Values not added.")
return
elif listItem is not None:
if self.cPanel._selectedItem is None:
print("No selected object... Value was not added.")
return
index = self.cPanel.getListItemIndex(self.cPanel._selectedItem) + 1
elif random:
index = self.cPanel.getRandomIndex()
tempString = self.paddNum(self.sliderRed.getValue()) + ',' + self.paddNum(self.sliderGreen.getValue()) + ',' + self.paddNum(self.sliderBlue.getValue()) + ',' + self.paddNum(self.parseFadeValue()) + ';'
self.tempText['text'] = tempString
#self.writeToFile(file="../config/command", text=tempString + '\n')
self.cPanel.addItem(tempString, index)
def addDelayValue(self):
# Check range of value
delayValStr = self.delayVal.get()
try:
value = int(delayValStr)
if value < 1 or value > 255:
print("Delay value out of byte range")
return -1
except ValueError as err:
print(err)
return -1
delayValStr = "DELAY: " + delayValStr
self.cPanel.addItem(delayValStr)
def paddNum(self, num=0):
if num > 255:
print("Fade number > 255. Defaulting to 000")
return "000"
paddedZeros = ""
# Generate the correct number of padding zeros
if num < 100:
paddedZeros += '0'
if num < 10:
paddedZeros += '0'
# Pad the number
paddedZeros += str(num)
return paddedZeros
def writeToFile(self, file=None, fileArgs='a', text=None):
if file is None:
print("No file to write to...")
return
f = open(file, fileArgs)
f.write(text)
#from SerialHelper import getSerialPorts
#for sp in getSerialPorts():
# print(sp)
# Start the app up!
app = App()
app.master.title("RGB Lights 3000")
app.master.config(menu=app.my_menu, background=mainBackgroundColor)
#subprocess.call(["./controller.py", "/dev/ttyUSB0"])
# Start up the app and the process manager
pid = os.fork()
if pid:
# parent
app.mainloop()
os.kill(pid, signal.SIGTERM)
else:
# child
exec(open("./code/ProcessControl/ProcessManager.py").read())
#os.execlp("python3", "python3", "./ProcessControl/ProcessManager.py")
#os.system("controller.py")
#app.mainloop()
#print("here")
|
[
"os.kill",
"SerialHelper.getSerialPorts",
"SerialHelper.SerialHelper",
"os.getcwd",
"functools.partial",
"Views.CommandPanel.CommandPanel",
"os.fork",
"ProcessControl.ProcessManager.sendCommand"
] |
[((9776, 9785), 'os.fork', 'os.fork', ([], {}), '()\n', (9783, 9785), False, 'import os, signal\n'), ((9831, 9859), 'os.kill', 'os.kill', (['pid', 'signal.SIGTERM'], {}), '(pid, signal.SIGTERM)\n', (9838, 9859), False, 'import os, signal\n'), ((677, 704), 'SerialHelper.SerialHelper', 'SerialHelper.SerialHelper', ([], {}), '()\n', (702, 704), False, 'import SerialHelper\n'), ((1394, 1408), 'Views.CommandPanel.CommandPanel', 'CommandPanel', ([], {}), '()\n', (1406, 1408), False, 'from Views.CommandPanel import CommandPanel\n'), ((5219, 5248), 'SerialHelper.getSerialPorts', 'SerialHelper.getSerialPorts', ([], {}), '()\n', (5246, 5248), False, 'import SerialHelper\n'), ((5365, 5417), 'functools.partial', 'partial', (['self.selectPort', 'sp[0]', 'self.connectedLabel'], {}), '(self.selectPort, sp[0], self.connectedLabel)\n', (5372, 5417), False, 'from functools import partial\n'), ((5974, 6069), 'ProcessControl.ProcessManager.sendCommand', 'ProcessManager.sendCommand', (['"""controller.py"""', 'ProcessCommandEnum.ProcessCommandEnum.RESTART'], {}), "('controller.py', ProcessCommandEnum.\n ProcessCommandEnum.RESTART)\n", (6000, 6069), False, 'from ProcessControl import ProcessManager, ProcessCommandEnum\n'), ((6537, 6564), 'functools.partial', 'partial', (['self.changeMode', 'm'], {}), '(self.changeMode, m)\n', (6544, 6564), False, 'from functools import partial\n'), ((6822, 6833), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6831, 6833), False, 'import os, signal\n'), ((2362, 2406), 'functools.partial', 'partial', (['self.addValues'], {'listItem': '"""Not None"""'}), "(self.addValues, listItem='Not None')\n", (2369, 2406), False, 'from functools import partial\n'), ((2559, 2595), 'functools.partial', 'partial', (['self.addValues'], {'random': '(True)'}), '(self.addValues, random=True)\n', (2566, 2595), False, 'from functools import partial\n')]
|
import os
import json
import tempfile
import shutil
import unittest
from sarpy.io.complex.sicd import SICDReader
from sarpy.io.product.sidd import SIDDReader
from sarpy.io.product.sidd_schema import get_schema_path
from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_dynamic_image_sidd, create_csi_sidd
from sarpy.processing.ortho_rectify import NearestNeighborMethod
from tests import parse_file_entry
try:
from lxml import etree
except ImportError:
etree = None
product_file_types = {}
this_loc = os.path.abspath(__file__)
file_reference = os.path.join(os.path.split(this_loc)[0], 'product_file_types.json') # specifies file locations
if os.path.isfile(file_reference):
with open(file_reference, 'r') as fi:
the_files = json.load(fi)
for the_type in the_files:
valid_entries = []
for entry in the_files[the_type]:
the_file = parse_file_entry(entry)
if the_file is not None:
valid_entries.append(the_file)
product_file_types[the_type] = valid_entries
sicd_files = product_file_types.get('SICD', [])
def check_versus_schema(input_nitf, the_schema):
reader = SIDDReader(input_nitf)
sidd_bytes = reader.nitf_details.get_des_bytes(0)
xml_doc = etree.fromstring(sidd_bytes)
xml_schema = etree.XMLSchema(file=the_schema)
return xml_schema.validate(xml_doc)
class TestSIDDWriting(unittest.TestCase):
@unittest.skipIf(len(sicd_files) == 0, 'No sicd files found')
def test_sidd_creation(self):
for fil in sicd_files:
reader = SICDReader(fil)
ortho_helper = NearestNeighborMethod(reader)
# create a temp directory
temp_directory = tempfile.mkdtemp()
sidd_files = []
# create a basic sidd detected image
with self.subTest(msg='Create version 1 detected image for file {}'.format(fil)):
create_detected_image_sidd(
ortho_helper, temp_directory, output_file='di_1.nitf', version=1)
sidd_files.append('di_1.nitf')
with self.subTest(msg='Create version 2 detected image for file {}'.format(fil)):
create_detected_image_sidd(
ortho_helper, temp_directory, output_file='di_2.nitf', version=2)
sidd_files.append('di_2.nitf')
# create a csi image
with self.subTest(msg='Create version 1 csi for file {}'.format(fil)):
create_csi_sidd(
ortho_helper, temp_directory, output_file='csi_1.nitf', version=1)
sidd_files.append('csi_1.nitf')
with self.subTest(msg='Create version 2 csi for file {}'.format(fil)):
create_csi_sidd(
ortho_helper, temp_directory, output_file='csi_2.nitf', version=2)
sidd_files.append('csi_2.nitf')
# create a dynamic image
with self.subTest(msg='Create version 1 subaperture stack for file {}'.format(fil)):
create_dynamic_image_sidd(
ortho_helper, temp_directory, output_file='sast_1.nitf', version=1, frame_count=3)
sidd_files.append('sast_1.nitf')
with self.subTest(msg='Create version 2 subaperture stack for file {}'.format(fil)):
create_dynamic_image_sidd(
ortho_helper, temp_directory, output_file='sast_2.nitf', version=2, frame_count=3)
sidd_files.append('sast_2.nitf')
# check that each sidd structure serialized according to the schema
if etree is not None:
for vers in [1, 2]:
schema = get_schema_path('urn:SIDD:{}.0.0'.format(vers))
the_fil = 'di_{}.nitf'.format(vers)
if the_fil in sidd_files:
self.assertTrue(
check_versus_schema(os.path.join(temp_directory, the_fil), schema),
'Detected image version {} structure not valid versus schema {}'.format(vers, schema))
the_fil = 'csi_{}.nitf'.format(vers)
if the_fil in sidd_files:
self.assertTrue(
check_versus_schema(os.path.join(temp_directory, the_fil), schema),
'csi version {} structure not valid versus schema {}'.format(vers, schema))
the_fil = 'sast_{}.nitf'.format(vers)
if the_fil in sidd_files:
self.assertTrue(
check_versus_schema(os.path.join(temp_directory, the_fil), schema),
'Dynamic image version {} structure not valid versus schema {}'.format(vers, schema))
# clean up the temporary directory
shutil.rmtree(temp_directory)
|
[
"lxml.etree.XMLSchema",
"sarpy.processing.ortho_rectify.NearestNeighborMethod",
"tests.parse_file_entry",
"os.path.join",
"os.path.split",
"os.path.isfile",
"json.load",
"sarpy.io.complex.sicd.SICDReader",
"sarpy.io.product.sidd.SIDDReader",
"tempfile.mkdtemp",
"sarpy.processing.sidd.sidd_product_creation.create_detected_image_sidd",
"lxml.etree.fromstring",
"shutil.rmtree",
"os.path.abspath",
"sarpy.processing.sidd.sidd_product_creation.create_dynamic_image_sidd",
"sarpy.processing.sidd.sidd_product_creation.create_csi_sidd"
] |
[((551, 576), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (566, 576), False, 'import os\n'), ((693, 723), 'os.path.isfile', 'os.path.isfile', (['file_reference'], {}), '(file_reference)\n', (707, 723), False, 'import os\n'), ((1226, 1248), 'sarpy.io.product.sidd.SIDDReader', 'SIDDReader', (['input_nitf'], {}), '(input_nitf)\n', (1236, 1248), False, 'from sarpy.io.product.sidd import SIDDReader\n'), ((1317, 1345), 'lxml.etree.fromstring', 'etree.fromstring', (['sidd_bytes'], {}), '(sidd_bytes)\n', (1333, 1345), False, 'from lxml import etree\n'), ((1363, 1395), 'lxml.etree.XMLSchema', 'etree.XMLSchema', ([], {'file': 'the_schema'}), '(file=the_schema)\n', (1378, 1395), False, 'from lxml import etree\n'), ((607, 630), 'os.path.split', 'os.path.split', (['this_loc'], {}), '(this_loc)\n', (620, 630), False, 'import os\n'), ((787, 800), 'json.load', 'json.load', (['fi'], {}), '(fi)\n', (796, 800), False, 'import json\n'), ((1633, 1648), 'sarpy.io.complex.sicd.SICDReader', 'SICDReader', (['fil'], {}), '(fil)\n', (1643, 1648), False, 'from sarpy.io.complex.sicd import SICDReader\n'), ((1676, 1705), 'sarpy.processing.ortho_rectify.NearestNeighborMethod', 'NearestNeighborMethod', (['reader'], {}), '(reader)\n', (1697, 1705), False, 'from sarpy.processing.ortho_rectify import NearestNeighborMethod\n'), ((1774, 1792), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1790, 1792), False, 'import tempfile\n'), ((4914, 4943), 'shutil.rmtree', 'shutil.rmtree', (['temp_directory'], {}), '(temp_directory)\n', (4927, 4943), False, 'import shutil\n'), ((940, 963), 'tests.parse_file_entry', 'parse_file_entry', (['entry'], {}), '(entry)\n', (956, 963), False, 'from tests import parse_file_entry\n'), ((1981, 2078), 'sarpy.processing.sidd.sidd_product_creation.create_detected_image_sidd', 'create_detected_image_sidd', (['ortho_helper', 'temp_directory'], {'output_file': '"""di_1.nitf"""', 'version': '(1)'}), "(ortho_helper, temp_directory, output_file=\n 'di_1.nitf', version=1)\n", (2007, 2078), False, 'from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_dynamic_image_sidd, create_csi_sidd\n'), ((2252, 2349), 'sarpy.processing.sidd.sidd_product_creation.create_detected_image_sidd', 'create_detected_image_sidd', (['ortho_helper', 'temp_directory'], {'output_file': '"""di_2.nitf"""', 'version': '(2)'}), "(ortho_helper, temp_directory, output_file=\n 'di_2.nitf', version=2)\n", (2278, 2349), False, 'from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_dynamic_image_sidd, create_csi_sidd\n'), ((2546, 2632), 'sarpy.processing.sidd.sidd_product_creation.create_csi_sidd', 'create_csi_sidd', (['ortho_helper', 'temp_directory'], {'output_file': '"""csi_1.nitf"""', 'version': '(1)'}), "(ortho_helper, temp_directory, output_file='csi_1.nitf',\n version=1)\n", (2561, 2632), False, 'from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_dynamic_image_sidd, create_csi_sidd\n'), ((2797, 2883), 'sarpy.processing.sidd.sidd_product_creation.create_csi_sidd', 'create_csi_sidd', (['ortho_helper', 'temp_directory'], {'output_file': '"""csi_2.nitf"""', 'version': '(2)'}), "(ortho_helper, temp_directory, output_file='csi_2.nitf',\n version=2)\n", (2812, 2883), False, 'from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_dynamic_image_sidd, create_csi_sidd\n'), ((3100, 3213), 'sarpy.processing.sidd.sidd_product_creation.create_dynamic_image_sidd', 'create_dynamic_image_sidd', (['ortho_helper', 'temp_directory'], {'output_file': '"""sast_1.nitf"""', 'version': '(1)', 'frame_count': '(3)'}), "(ortho_helper, temp_directory, output_file=\n 'sast_1.nitf', version=1, frame_count=3)\n", (3125, 3213), False, 'from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_dynamic_image_sidd, create_csi_sidd\n'), ((3392, 3505), 'sarpy.processing.sidd.sidd_product_creation.create_dynamic_image_sidd', 'create_dynamic_image_sidd', (['ortho_helper', 'temp_directory'], {'output_file': '"""sast_2.nitf"""', 'version': '(2)', 'frame_count': '(3)'}), "(ortho_helper, temp_directory, output_file=\n 'sast_2.nitf', version=2, frame_count=3)\n", (3417, 3505), False, 'from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_dynamic_image_sidd, create_csi_sidd\n'), ((3990, 4027), 'os.path.join', 'os.path.join', (['temp_directory', 'the_fil'], {}), '(temp_directory, the_fil)\n', (4002, 4027), False, 'import os\n'), ((4346, 4383), 'os.path.join', 'os.path.join', (['temp_directory', 'the_fil'], {}), '(temp_directory, the_fil)\n', (4358, 4383), False, 'import os\n'), ((4692, 4729), 'os.path.join', 'os.path.join', (['temp_directory', 'the_fil'], {}), '(temp_directory, the_fil)\n', (4704, 4729), False, 'import os\n')]
|
import random
# use of the random module
print(random.random()) # a float value >= 0.0 and < 1.0
print(random.random()*100) # a float value >= 0.0 and < 100.0
# use of the randint method
print(random.randint(1, 100)) # an int from 1 to 100
print(random.randint(101, 200)) # an int from 101 to 200
print(random.randint(0, 7)) # an int from 0 7
die1 = random.randint(1, 6)
die2 = random.randint(1, 6)
print("Your roll: ", die1, die2)
print(random.randrange(1, 100)) # an int from 1 to 99
print(random.randrange(100, 200, 2)) # an even int from 100 to 198
print(random.randrange(11, 250, 2)) # an odd int from 11 to 249
|
[
"random.random",
"random.randint",
"random.randrange"
] |
[((385, 405), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (399, 405), False, 'import random\n'), ((413, 433), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (427, 433), False, 'import random\n'), ((48, 63), 'random.random', 'random.random', ([], {}), '()\n', (61, 63), False, 'import random\n'), ((209, 231), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (223, 231), False, 'import random\n'), ((268, 292), 'random.randint', 'random.randint', (['(101)', '(200)'], {}), '(101, 200)\n', (282, 292), False, 'import random\n'), ((329, 349), 'random.randint', 'random.randint', (['(0)', '(7)'], {}), '(0, 7)\n', (343, 349), False, 'import random\n'), ((474, 498), 'random.randrange', 'random.randrange', (['(1)', '(100)'], {}), '(1, 100)\n', (490, 498), False, 'import random\n'), ((540, 569), 'random.randrange', 'random.randrange', (['(100)', '(200)', '(2)'], {}), '(100, 200, 2)\n', (556, 569), False, 'import random\n'), ((614, 642), 'random.randrange', 'random.randrange', (['(11)', '(250)', '(2)'], {}), '(11, 250, 2)\n', (630, 642), False, 'import random\n'), ((113, 128), 'random.random', 'random.random', ([], {}), '()\n', (126, 128), False, 'import random\n')]
|
from PyInquirer import style_from_dict, Token, prompt, Separator
from lnt.graphics.utils import vars_to_string
# Mark styles
prompt_style = style_from_dict({
Token.Separator: '#6C6C6C',
Token.QuestionMark: '#FF9D00 bold',
#Token.Selected: '', # default
Token.Selected: '#5F819D',
Token.Pointer: '#FF9D00 bold',
Token.Instruction: '', # default
Token.Answer: '#5F819D bold',
Token.Question: '',
})
# Mark prompt configurations
def get_channel_choice_from(channels):
choices = [ {'name' : vars_to_string(c_id, c['local_balance'], c['remote_balance'], nick=None) } for c_id, c in channels.items() ]
validate = lambda answer: 'You must choose at least one channel' if len(answer) == 0 else True
return {
"type" : "checkbox",
"qmark": "⚡️",
"message" : "CHOOSE FROM nick, channel id, local_balance, remote_balace, graphic",
"name" : "channel_choices_from",
"choices" : choices,
"validate" : validate,
}
def get_channel_choice_to(channels):
choices = [ {'name' : vars_to_string(c_id, c['local_balance'],
c['remote_balance'], nick=None) } for c_id, c in channels.items() ]
return {
'type': 'list',
'message': 'CHOOSE TO nick, channel id, local_balance, remote_balace, graphic',
"name" : "channel_choices_to",
'choices': choices
}
|
[
"lnt.graphics.utils.vars_to_string",
"PyInquirer.style_from_dict"
] |
[((141, 365), 'PyInquirer.style_from_dict', 'style_from_dict', (["{Token.Separator: '#6C6C6C', Token.QuestionMark: '#FF9D00 bold', Token.\n Selected: '#5F819D', Token.Pointer: '#FF9D00 bold', Token.Instruction:\n '', Token.Answer: '#5F819D bold', Token.Question: ''}"], {}), "({Token.Separator: '#6C6C6C', Token.QuestionMark:\n '#FF9D00 bold', Token.Selected: '#5F819D', Token.Pointer:\n '#FF9D00 bold', Token.Instruction: '', Token.Answer: '#5F819D bold',\n Token.Question: ''})\n", (156, 365), False, 'from PyInquirer import style_from_dict, Token, prompt, Separator\n'), ((529, 601), 'lnt.graphics.utils.vars_to_string', 'vars_to_string', (['c_id', "c['local_balance']", "c['remote_balance']"], {'nick': 'None'}), "(c_id, c['local_balance'], c['remote_balance'], nick=None)\n", (543, 601), False, 'from lnt.graphics.utils import vars_to_string\n'), ((1069, 1141), 'lnt.graphics.utils.vars_to_string', 'vars_to_string', (['c_id', "c['local_balance']", "c['remote_balance']"], {'nick': 'None'}), "(c_id, c['local_balance'], c['remote_balance'], nick=None)\n", (1083, 1141), False, 'from lnt.graphics.utils import vars_to_string\n')]
|
# -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.orm import load_only
from sqlalchemy import func
from flask import abort
from markdown import Markdown,markdown
from app.models import db,fragment_tags_table
from app.models.tag import Tag
from app.whoosh import search_helper
class Fragment(db.Model):
'''知识碎片'''
__tablename__ = 'fragment'
__table_args__ = {
"mysql_engine": "InnoDB",
"mysql_charset": "utf8"
}
id = db.Column(db.Integer,nullable=False,primary_key=True,autoincrement=True)
title = db.Column(db.String(255),nullable=False,default="",index=True)
access = db.Column(db.Integer,nullable=False,default=1)
status = db.Column(db.Integer,nullable=False,default=0)
markdown = db.deferred(db.Column(LONGTEXT,default="",nullable=False))
html = db.deferred(db.Column(LONGTEXT,default="",nullable=False))
publish_markdown = db.deferred(db.Column(LONGTEXT,default="",nullable=False))
publish_html = db.deferred(db.Column(LONGTEXT,default="",nullable=False))
publish_timestamp = db.Column(db.DateTime,default=datetime.now,nullable=False)
updatetime = db.Column(db.DateTime,default=datetime.now,nullable=False)
user_id = db.Column(db.Integer,db.ForeignKey('user.id'))
tags = db.relationship('Tag',secondary=fragment_tags_table,backref=db.backref('fragments'))
# branch = db.relationship('Branch',back_populates='fragment',uselist=False)
branch_id = db.Column(db.Integer,db.ForeignKey('branch.id'))
# branch = db.relationship('Branch',foreign_keys=branch_id)
def get(self,id):
return Fragment.query.get(id)
@staticmethod
def get_or_404(id):
fragment = Fragment.query.get(id)
if fragment:
return fragment
abort(404)
def save(self):
self.html = self.markdown2html(self.markdown)
db.session.add(self)
db.session.commit()
search_helper.add_document(self.title,str(self.id),self.markdown)
def markdown2html(self,content):
# md = Markdown(['codehilite', 'fenced_code', 'meta', 'tables'])
# html = md.convert(content)
html = markdown(content,extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
return html
@staticmethod
def get_nearest_fragments(num=5):
fragments = Fragment.query.filter().order_by(Fragment.updatetime.desc()).limit(num)
res = []
from app.models.branch import Branch
for fragment in fragments:
fragment.branch = Branch.get(fragment.branch_id)
res.append(fragment)
return res
|
[
"markdown.markdown",
"app.models.db.ForeignKey",
"app.models.branch.Branch.get",
"app.models.db.String",
"app.models.db.Column",
"app.models.db.session.commit",
"app.models.db.session.add",
"flask.abort",
"app.models.db.backref"
] |
[((522, 597), 'app.models.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)', 'primary_key': '(True)', 'autoincrement': '(True)'}), '(db.Integer, nullable=False, primary_key=True, autoincrement=True)\n', (531, 597), False, 'from app.models import db, fragment_tags_table\n'), ((683, 731), 'app.models.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)', 'default': '(1)'}), '(db.Integer, nullable=False, default=1)\n', (692, 731), False, 'from app.models import db, fragment_tags_table\n'), ((743, 791), 'app.models.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)', 'default': '(0)'}), '(db.Integer, nullable=False, default=0)\n', (752, 791), False, 'from app.models import db, fragment_tags_table\n'), ((1118, 1178), 'app.models.db.Column', 'db.Column', (['db.DateTime'], {'default': 'datetime.now', 'nullable': '(False)'}), '(db.DateTime, default=datetime.now, nullable=False)\n', (1127, 1178), False, 'from app.models import db, fragment_tags_table\n'), ((1194, 1254), 'app.models.db.Column', 'db.Column', (['db.DateTime'], {'default': 'datetime.now', 'nullable': '(False)'}), '(db.DateTime, default=datetime.now, nullable=False)\n', (1203, 1254), False, 'from app.models import db, fragment_tags_table\n'), ((617, 631), 'app.models.db.String', 'db.String', (['(255)'], {}), '(255)\n', (626, 631), False, 'from app.models import db, fragment_tags_table\n'), ((817, 864), 'app.models.db.Column', 'db.Column', (['LONGTEXT'], {'default': '""""""', 'nullable': '(False)'}), "(LONGTEXT, default='', nullable=False)\n", (826, 864), False, 'from app.models import db, fragment_tags_table\n'), ((887, 934), 'app.models.db.Column', 'db.Column', (['LONGTEXT'], {'default': '""""""', 'nullable': '(False)'}), "(LONGTEXT, default='', nullable=False)\n", (896, 934), False, 'from app.models import db, fragment_tags_table\n'), ((969, 1016), 'app.models.db.Column', 'db.Column', (['LONGTEXT'], {'default': '""""""', 'nullable': '(False)'}), "(LONGTEXT, default='', nullable=False)\n", (978, 1016), False, 'from app.models import db, fragment_tags_table\n'), ((1047, 1094), 'app.models.db.Column', 'db.Column', (['LONGTEXT'], {'default': '""""""', 'nullable': '(False)'}), "(LONGTEXT, default='', nullable=False)\n", (1056, 1094), False, 'from app.models import db, fragment_tags_table\n'), ((1289, 1313), 'app.models.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (1302, 1313), False, 'from app.models import db, fragment_tags_table\n'), ((1529, 1555), 'app.models.db.ForeignKey', 'db.ForeignKey', (['"""branch.id"""'], {}), "('branch.id')\n", (1542, 1555), False, 'from app.models import db, fragment_tags_table\n'), ((1824, 1834), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (1829, 1834), False, 'from flask import abort\n'), ((1918, 1938), 'app.models.db.session.add', 'db.session.add', (['self'], {}), '(self)\n', (1932, 1938), False, 'from app.models import db, fragment_tags_table\n'), ((1947, 1966), 'app.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1964, 1966), False, 'from app.models import db, fragment_tags_table\n'), ((2204, 2328), 'markdown.markdown', 'markdown', (['content'], {'extensions': "['markdown.extensions.extra', 'markdown.extensions.codehilite',\n 'markdown.extensions.toc']"}), "(content, extensions=['markdown.extensions.extra',\n 'markdown.extensions.codehilite', 'markdown.extensions.toc'])\n", (2212, 2328), False, 'from markdown import Markdown, markdown\n'), ((1386, 1409), 'app.models.db.backref', 'db.backref', (['"""fragments"""'], {}), "('fragments')\n", (1396, 1409), False, 'from app.models import db, fragment_tags_table\n'), ((2767, 2797), 'app.models.branch.Branch.get', 'Branch.get', (['fragment.branch_id'], {}), '(fragment.branch_id)\n', (2777, 2797), False, 'from app.models.branch import Branch\n')]
|
from .constants import SUCCESS_KEY, MESSAGE_KEY, DATA_KEY
from cloudygram_api_server.scripts import CGMessage
from typing import List
class TtModels:
@staticmethod
def sing_in_failure(message) -> dict:
return {
SUCCESS_KEY : False,
MESSAGE_KEY : message
}
@staticmethod
def send_code_failure(message) -> dict:
return {
SUCCESS_KEY : False,
MESSAGE_KEY : message
}
@staticmethod
def message_list(messages) -> dict:
mapped_messages: List[str] = []
for m in messages:
mapped_messages.append(CGMessage.map_from_tt(m))
return {
SUCCESS_KEY: True,
DATA_KEY: mapped_messages
}
|
[
"cloudygram_api_server.scripts.CGMessage.map_from_tt"
] |
[((624, 648), 'cloudygram_api_server.scripts.CGMessage.map_from_tt', 'CGMessage.map_from_tt', (['m'], {}), '(m)\n', (645, 648), False, 'from cloudygram_api_server.scripts import CGMessage\n')]
|
from collections import namedtuple
class SSParser:
"""
Create a SS block from PDB data.
Written to be agnostic of PDB parser, but for now only has PyMOL.
.. code-block:: python
import pymol2
with pymol2.PyMOL() as pymol:
pymol.cmd.load('model.pdb', 'prot')
ss = SSParser().parse_pymol(pymol.cmd)
print(ss)
# or
SSParser.correct_file('model.pdb', True)
Do note that the lines seem offset because SHEET has a name parameter.
HELIX 1 HA GLY A 86 GLY A 94 1 9
SHEET 5 A 5 GLY A 52 PHE A 56 -1 N PHE A 56 O TRP A 71
SHEET 1 B 5 THR B 107 ARG B 110 0
"""
# faux pymol atom
Atom = namedtuple('Atom', ['ss', 'resi', 'resn', 'chain'])
def __init__(self):
# none of the attributes are actually public.
self.ss = []
self.start = self.Atom('L', 0, 'XXX', 'X')
self.previous = self.Atom('L', 0, 'XXX', 'X')
self.ss_count = {'H': 1, 'S': 1, 'L': 0}
def parse_pymol(self, cmd, selector: str = 'name ca') -> str:
atoms = list(cmd.get_model(selector).atom)
return self.parse(atoms)
def parse(self, atoms: list) -> str:
"""
atoms is a list of objects with 'ss', 'resi', 'resn'.
one per residue (CA).
This does not collapse the list into a list of ranges, as resn is also require etc.
:param atoms:
:return:
"""
for current in atoms:
if self.previous.ss != current.ss or self.previous.chain != current.chain: # different
self._store_ss() # the previous ss has come to an end.
# deal with current
if current.ss in ('S', 'H'): # start of a new
self.start = current
# move on
self.previous = current
self._store_ss()
return str(self)
def _store_ss(self):
"""
The SS sequence has come to an end: store it.
:return:
"""
if self.previous.ss == '':
return # not AA?
if int(self.previous.resi) == int(self.start.resi) + 1:
return # too short
cc = self.ss_count[self.previous.ss]
if self.previous.ss == 'H': # previous was the other type
self.ss.append(
f'HELIX {cc: >3} {cc: >3} ' +
f'{self.start.resn} {self.start.chain} {self.start.resi: >4} ' +
f'{self.previous.resn} {self.previous.chain} {self.previous.resi: >4} 1' +
' ' +
f'{int(self.previous.resi) - int(self.start.resi): >2}'
)
self.ss_count[self.previous.ss] += 1
elif self.previous.ss == 'S': # previous was the other type
self.ss.append(
f'SHEET {cc: >3} {cc: >2}S 1 ' +
f'{self.start.resn} {self.start.chain}{self.start.resi: >4} ' +
f'{self.previous.resn} {self.previous.chain}{self.previous.resi: >4} 0')
self.ss_count[self.previous.ss] += 1
else:
# loop? Nothing.
pass
def __str__(self):
return '\n'.join(self.ss) +'\n'
@classmethod
def correct_file(cls, filename: str, write:bool=True):
import pymol2
with pymol2.PyMOL() as pymol:
pymol.cmd.load(filename, 'prot')
ss = cls().parse_pymol(pymol.cmd)
with open(filename, 'r') as fh:
block = fh.read()
if write:
with open(filename, 'w') as fh:
fh.write(ss + block)
return ss + block
@classmethod
def correct_block(cls, block: str):
import pymol2
with pymol2.PyMOL() as pymol:
pymol.cmd.read_pdbstr(block, 'prot')
ss = cls().parse_pymol(pymol.cmd)
return ss + block
|
[
"collections.namedtuple",
"pymol2.PyMOL"
] |
[((754, 805), 'collections.namedtuple', 'namedtuple', (['"""Atom"""', "['ss', 'resi', 'resn', 'chain']"], {}), "('Atom', ['ss', 'resi', 'resn', 'chain'])\n", (764, 805), False, 'from collections import namedtuple\n'), ((3384, 3398), 'pymol2.PyMOL', 'pymol2.PyMOL', ([], {}), '()\n', (3396, 3398), False, 'import pymol2\n'), ((3789, 3803), 'pymol2.PyMOL', 'pymol2.PyMOL', ([], {}), '()\n', (3801, 3803), False, 'import pymol2\n')]
|
from django.urls import reverse
from Net640.settings import FRONTEND_DATE_FORMAT
class AsDictMessageMixin:
"""
Mixin for representing user messages(post, comments) as dictionaries
"""
def as_dict(self, executor):
return {'content': self.content,
'user_has_like': self.has_like(executor),
'is_owner': self.user == executor,
'rating': round(self.get_rating(), 1),
'author': self.user.username,
'author_page': reverse('friends:user_view', kwargs={'user_id': self.user.id}),
'date': self.date.strftime(FRONTEND_DATE_FORMAT),
'id': self.id,
'author_thumbnail_url': self.user.get_thumbnail_url(), }
|
[
"django.urls.reverse"
] |
[((514, 576), 'django.urls.reverse', 'reverse', (['"""friends:user_view"""'], {'kwargs': "{'user_id': self.user.id}"}), "('friends:user_view', kwargs={'user_id': self.user.id})\n", (521, 576), False, 'from django.urls import reverse\n')]
|
"""A module for converting a data source to TFRecords."""
import os
import json
import copy
import csv
from pathlib import Path
from shutil import rmtree
import PIL.Image as Image
import tensorflow as tf
from tqdm import tqdm
from .feature import items_to_features
from .errors import DirNotFoundError, InvalidDatasetFormat
from ..config import IMAGE_WIDTH, IMAGE_HEIGHT, DATASET_DIR, TFRECORDS_SIZE
# ------------------------------------------------------------------------------
# CSV/COCO Dataset Detectors
# ------------------------------------------------------------------------------
def is_csv_input(input_dir: Path) -> bool:
"""
Tests if the input directory represents CSV dataset format.
Args:
input_dir (Path):
The input directory to test.
Returns:
status (bool):
Returns `True` if the input directory represents CSV dataset
format and `False` otherwise.
"""
return set(os.listdir(input_dir)) == set(
[
"images",
"instances_train.csv",
"instances_test.csv",
"instances_val.csv",
"categories.json",
]
)
def is_coco_input(input_dir: Path) -> bool:
"""
Tests if the input directory represents COCO dataset format.
Args:
input_dir (Path):
The input directory to test.
Returns:
status (bool):
Returns `True` if the input directory represents COCO dataset
format and `False` otherwise.
"""
root_artifacts = os.listdir(input_dir)
if "annotations" in root_artifacts:
annotations_artifacts = os.listdir(input_dir / "annotations")
stems_artifacts = [
Path(artifact).stem for artifact in annotations_artifacts
]
return set(stems_artifacts).issubset(set(root_artifacts))
return False
# ------------------------------------------------------------------------------
# CSV/COCO Dataset Iterators
# ------------------------------------------------------------------------------
class CategoriesMap:
"""
A dictionary-like object for intelligently mapping categories.
The goal of this class is to remap user-specified categories for the
compact one-hot encoding. Let's review a simple example. Assume, the
original data has images that include objects belonging to the two
categories: 15 and 20. If we do not remap these categories, then we
need to create one-hot with length `max(15,20) + 1 = 21` (plus one
is to allow one additional category "no object"). This creates
unnecessary overhead during the model training. The most intuitive
solution would be to remap the original categories to the following
`{15: 1, 20: 2}`. In this case, the one-hot encoding length would be
`max(1,2) + 1 = 3` .
To initiate remapping the `selected_categories` argument should be
defined. All selected category IDs will be sorted in ascending order
with the consequent re-assignment to the new IDs. For example, let's
assume the specified selected categories are `[12, 5, 3, 23]`, after
sorting this list will have the following `[3, 5, 12, 23]` and the
remapping `{3: 1, 5: 2, 12: 3, 23: 4}`.
If the `selected_categories` argument is defined the following
operation `map[ORIGINAL_ID]` returns `NEW_ID` (remapped category ID).
If the `selected_categories` argument does not defined the following
operation `map[ORIGINAL_ID]` returns `ORIGINAL_ID` (in other words
its remaps ID to itself).
Args:
selected_categories (list):
The list of categories to map.
"""
def __init__(self, selected_categories: list):
self.__categories_mapping = {}
if len(selected_categories) > 0:
for new_category_id, old_category_id in enumerate(
sorted(selected_categories)
):
self.__categories_mapping[old_category_id] = (
new_category_id + 1
)
def __getitem__(self, category_id):
"""Returns the remapped category ID."""
if self.__categories_mapping:
return self.__categories_mapping[category_id]
else:
return category_id
def __contains__(self, category_id):
"""Tests if the specified category ID in the map."""
if self.__categories_mapping:
return category_id in self.__categories_mapping
else:
return True
class DatasetIterator:
"""
The base class for dataset records iterator.
Args:
records (list):
The list with records to iterate.
image_dir (Path):
The base path for loading images.
"""
def __init__(self, records: list, image_dir: Path):
self.__records = records
self.__image_dir = image_dir
self.__size = len(self.__records)
self.__pointer = 0
def __iter__(self):
"""Returns the dataset records iterator."""
return self
def __len__(self):
"""Returns a number of records in the dataset."""
return self.__size
def __next__(self):
"""Returns the next record."""
if self.__pointer >= self.__size:
raise StopIteration
record = self.__records[self.__pointer]
record["image"]["data"] = Image.open(
self.__image_dir / record["image"]["file_name"]
)
self.__pointer += 1
return record
class CsvIterator(DatasetIterator):
"""
The CSV dataset iterator.
Args:
instance_file (Path):
The path to the `csv`-file with records to iterate.
selected_categories (list):
The list of category IDs on which iteration should take place.
If an image within a record does not contain a selected category
it's skipped. If the selected category IDs do not define, then
iterate goes via all images.
"""
def __init__(self, instance_file: Path, selected_categories: list):
categories_map = CategoriesMap(selected_categories)
categories = dict()
with open(instance_file.parent / "categories.json") as fp:
for category in json.load(fp)["categories"]:
category_id = category["id"]
if category_id in categories_map:
# Remaps ald category ID to the new one.
new_category = copy.deepcopy(category)
new_category["id"] = categories_map[category["id"]]
categories[new_category["id"]] = new_category
records = []
with open(instance_file, newline="\n") as csv_fp:
csv_reader = csv.DictReader(csv_fp, delimiter=",", quotechar='"')
for row in csv_reader:
annotations = []
for bbox, segmentation, category_id in zip(
json.loads(row["bboxes"]),
json.loads(row["segmentations"]),
json.loads(row["category_ids"]),
):
if category_id in categories_map:
annotations.append(
{
"bbox": bbox,
"iscrowd": 0,
"segmentation": [segmentation],
"category_id": categories_map[category_id],
}
)
# Here we discard all images which do not have any
# annotations for the selected categories.
if len(annotations) > 0:
records.append(
{
"image": {
"id": int(row["image_id"]),
"file_name": row["file_name"],
},
"annotations": annotations,
"categories": categories,
}
)
super().__init__(records, instance_file.parent / "images")
class CocoIterator(DatasetIterator):
"""
The COCO dataset iterator.
Args:
instance_file (Path):
The path to the `json`-file with records to iterate.
selected_categories (list):
The list of category IDs on which iteration should take place.
If an image within a record does not contain a selected category
it's skipped. If the selected category IDs do not define, then
iterate goes via all images.
"""
def __init__(self, instance_file: Path, selected_categories: list):
categories_map = CategoriesMap(selected_categories)
with open(instance_file) as f:
content = json.load(f)
annotations = dict()
for annotation in content["annotations"]:
category_id = annotation["category_id"]
if category_id in categories_map:
image_id = annotation["image_id"]
if image_id not in annotations:
annotations[image_id] = []
# Remaps ald category ID to the new one.
new_annotation = copy.deepcopy(annotation)
new_annotation["category_id"] = categories_map[category_id]
annotations[image_id].append(new_annotation)
categories = dict()
for category in content["categories"]:
category_id = category["id"]
if category_id in categories_map:
# Remaps ald category ID to the new one.
new_category = copy.deepcopy(category)
new_category["id"] = categories_map[category_id]
categories[new_category["id"]] = new_category
records = []
for image in content["images"]:
if image["id"] in annotations:
records.append(
{
"image": image,
"annotations": annotations[image["id"]],
"categories": categories,
}
)
super().__init__(
records, instance_file.parent.parent / instance_file.stem
)
# ------------------------------------------------------------------------------
# Dataset to TFRecords Transformer
# ------------------------------------------------------------------------------
def instances_to_tfrecords(
instance_file: Path,
output_dir: Path,
items: DatasetIterator,
size: int,
image_width: int,
image_height: int,
verbose: bool,
):
"""
Converse instances to tfrecords.
Args:
instance_file (Path):
The path to the instance file to read data from.
output_dir (Path):
The path to the output directory to save generated TFRecords.
items (DatasetIterator):
The CSV or COCO dataset iterator.
size (int):
The number of images per partion.
image_width (int):
The TFRecords image width resize to.
image_height (int):
The TFRecords image height resize to.
verbose (bool):
The flag to set verbose mode.
"""
def get_example(item):
image_id = item["image"]["id"]
img = item["image"]["data"]
annotations = item["annotations"]
categories = item["categories"]
category_max_id = max(list(categories.keys()))
bboxes = []
segmentations = []
category_ids = []
for annotation in annotations:
if annotation["iscrowd"] == 0:
bboxes.append(annotation["bbox"])
segmentations.append(annotation["segmentation"][0])
category_ids.append(annotation["category_id"])
feature = items_to_features(
image_id,
img,
image_width,
image_height,
bboxes,
segmentations,
category_ids,
category_max_id,
)
return tf.train.Example(features=tf.train.Features(feature=feature))
tfrecords_dir = output_dir / instance_file.stem
tfrecords_dir.mkdir(exist_ok=True)
# The TFRecords writer.
writer = None
# The index for the next TFRecords partition.
part_index = -1
# The count of how many records stored in the TFRecords files. It
# is set here to maximum capacity (as a trick) to make the "if"
# condition in the loop equals to True and start 0 - partition.
part_count = size
# Initializes the progress bar of verbose mode is on.
if verbose:
pbar = tqdm(total=len(items))
for item in items:
if item:
if part_count >= size:
# The current partition has been reached the maximum capacity,
# so we need to start a new one.
if writer is not None:
# Closes the existing TFRecords writer.
writer.close()
part_index += 1
writer = tf.io.TFRecordWriter(
str(tfrecords_dir / f"part-{part_index}.tfrecord")
)
part_count = 0
example = get_example(item)
if example:
writer.write(example.SerializeToString())
part_count += 1
# Updates the progress bar of verbose mode is on.
if verbose:
pbar.update(1)
# Closes the existing TFRecords writer after the last row.
writer.close()
def create_tfrecords(
dataset_dir: str = DATASET_DIR,
tfrecords_dir: str = None,
size: int = TFRECORDS_SIZE,
image_width: int = IMAGE_WIDTH,
image_height: int = IMAGE_HEIGHT,
selected_categories: list = [],
verbose: bool = False,
):
"""
This function transforms CSV or COCO dataset to TFRecords.
Args:
dataset_dir (str):
The path to the data set directory to transform.
tfrecords_dir (str):
The path to the output directory to save generated TFRecords.
size (int):
The number of images per partion.
image_width (int):
The TFRecords image width resize to.
image_height (int):
The TFRecords image height resize to.
selected_categories (list):
The list of selected category IDs.
verbose (bool):
The flag to set verbose mode.
Raises:
DirNotFoundError:
If input or output directories do not exist.
InvalidDatasetFormat:
If the input dataset has invalid CSV or COCO format.
"""
input_dir = Path(dataset_dir)
if not input_dir.exists():
raise DirNotFoundError("input dataset", input_dir)
if tfrecords_dir is None:
output_dir = input_dir.parent / (input_dir.name + "-tfrecords")
else:
output_dir = Path(tfrecords_dir)
if not output_dir.parent.exists():
raise DirNotFoundError("parent (to output)", output_dir.parent)
if output_dir.exists():
rmtree(output_dir)
output_dir.mkdir(exist_ok=True)
if is_csv_input(input_dir):
for instance_file in input_dir.rglob("*.csv"):
instances_to_tfrecords(
instance_file,
output_dir,
CsvIterator(instance_file, selected_categories),
size,
image_width,
image_height,
verbose,
)
elif is_coco_input(input_dir):
for instance_file in (input_dir / "annotations").rglob("*.json"):
instances_to_tfrecords(
instance_file,
output_dir,
CocoIterator(instance_file, selected_categories),
size,
image_width,
image_height,
verbose,
)
else:
raise InvalidDatasetFormat()
|
[
"json.loads",
"csv.DictReader",
"os.listdir",
"PIL.Image.open",
"copy.deepcopy",
"pathlib.Path",
"tensorflow.train.Features",
"shutil.rmtree",
"json.load"
] |
[((1557, 1578), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (1567, 1578), False, 'import os\n'), ((14752, 14769), 'pathlib.Path', 'Path', (['dataset_dir'], {}), '(dataset_dir)\n', (14756, 14769), False, 'from pathlib import Path\n'), ((1651, 1688), 'os.listdir', 'os.listdir', (["(input_dir / 'annotations')"], {}), "(input_dir / 'annotations')\n", (1661, 1688), False, 'import os\n'), ((5374, 5433), 'PIL.Image.open', 'Image.open', (["(self.__image_dir / record['image']['file_name'])"], {}), "(self.__image_dir / record['image']['file_name'])\n", (5384, 5433), True, 'import PIL.Image as Image\n'), ((14994, 15013), 'pathlib.Path', 'Path', (['tfrecords_dir'], {}), '(tfrecords_dir)\n', (14998, 15013), False, 'from pathlib import Path\n'), ((15170, 15188), 'shutil.rmtree', 'rmtree', (['output_dir'], {}), '(output_dir)\n', (15176, 15188), False, 'from shutil import rmtree\n'), ((966, 987), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (976, 987), False, 'import os\n'), ((6744, 6796), 'csv.DictReader', 'csv.DictReader', (['csv_fp'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(csv_fp, delimiter=\',\', quotechar=\'"\')\n', (6758, 6796), False, 'import csv\n'), ((8853, 8865), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8862, 8865), False, 'import json\n'), ((1729, 1743), 'pathlib.Path', 'Path', (['artifact'], {}), '(artifact)\n', (1733, 1743), False, 'from pathlib import Path\n'), ((6257, 6270), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (6266, 6270), False, 'import json\n'), ((9280, 9305), 'copy.deepcopy', 'copy.deepcopy', (['annotation'], {}), '(annotation)\n', (9293, 9305), False, 'import copy\n'), ((9694, 9717), 'copy.deepcopy', 'copy.deepcopy', (['category'], {}), '(category)\n', (9707, 9717), False, 'import copy\n'), ((12173, 12207), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (12190, 12207), True, 'import tensorflow as tf\n'), ((6477, 6500), 'copy.deepcopy', 'copy.deepcopy', (['category'], {}), '(category)\n', (6490, 6500), False, 'import copy\n'), ((6945, 6970), 'json.loads', 'json.loads', (["row['bboxes']"], {}), "(row['bboxes'])\n", (6955, 6970), False, 'import json\n'), ((6992, 7024), 'json.loads', 'json.loads', (["row['segmentations']"], {}), "(row['segmentations'])\n", (7002, 7024), False, 'import json\n'), ((7046, 7077), 'json.loads', 'json.loads', (["row['category_ids']"], {}), "(row['category_ids'])\n", (7056, 7077), False, 'import json\n')]
|
import numpy as np
import scipy.special as ss
import pathlib
from Particle import Particle
def ql_global(l, particles):
# Keep only particles that have neighbors (this was changed 5/23/2020)
particles = [i for i in particles if len(Particle.data[i].neighs)>0]
neigh_total = sum([len(Particle.data[i].neighs) for i in particles])
if isinstance(l, int):
if len(particles)!=0:
# average slmbar weighted by the number of neighbors
Qlmbar = list(sum([np.array(Particle.data[p].qlmbar[l], dtype=complex)*len(Particle.data[p].neighs)/neigh_total for p in particles]))
Qlmtilde = list(sum([np.array(Particle.data[p].qlmtilde[l], dtype=complex)*len(Particle.data[p].neighs)/neigh_total for p in particles]))
if l in Particle.qlmbar_ideal:
Ql = np.abs(np.sqrt((4*np.pi/(2*l+1))*np.vdot(np.array(Qlmtilde, dtype=complex), np.array(Qlmtilde, dtype=complex))))
else:
Qlmbar_mag_sq = np.abs(np.vdot(np.array(Qlmbar, dtype=complex), np.array(Qlmbar, dtype=complex)))
Ql = np.abs(np.sqrt((4*np.pi/(2*l+1))*Qlmbar_mag_sq))
D = np.sqrt(Qlmbar_mag_sq)
else:
Qlmbar = [0]*(2*l+1)
Qlmtilde = [0]*(2*l+1)
Ql = 0.0
return [Ql, Qlmbar, Qlmtilde]
|
[
"numpy.array",
"numpy.sqrt"
] |
[((1087, 1109), 'numpy.sqrt', 'np.sqrt', (['Qlmbar_mag_sq'], {}), '(Qlmbar_mag_sq)\n', (1094, 1109), True, 'import numpy as np\n'), ((1036, 1084), 'numpy.sqrt', 'np.sqrt', (['(4 * np.pi / (2 * l + 1) * Qlmbar_mag_sq)'], {}), '(4 * np.pi / (2 * l + 1) * Qlmbar_mag_sq)\n', (1043, 1084), True, 'import numpy as np\n'), ((952, 983), 'numpy.array', 'np.array', (['Qlmbar'], {'dtype': 'complex'}), '(Qlmbar, dtype=complex)\n', (960, 983), True, 'import numpy as np\n'), ((985, 1016), 'numpy.array', 'np.array', (['Qlmbar'], {'dtype': 'complex'}), '(Qlmbar, dtype=complex)\n', (993, 1016), True, 'import numpy as np\n'), ((479, 530), 'numpy.array', 'np.array', (['Particle.data[p].qlmbar[l]'], {'dtype': 'complex'}), '(Particle.data[p].qlmbar[l], dtype=complex)\n', (487, 530), True, 'import numpy as np\n'), ((618, 671), 'numpy.array', 'np.array', (['Particle.data[p].qlmtilde[l]'], {'dtype': 'complex'}), '(Particle.data[p].qlmtilde[l], dtype=complex)\n', (626, 671), True, 'import numpy as np\n'), ((831, 864), 'numpy.array', 'np.array', (['Qlmtilde'], {'dtype': 'complex'}), '(Qlmtilde, dtype=complex)\n', (839, 864), True, 'import numpy as np\n'), ((866, 899), 'numpy.array', 'np.array', (['Qlmtilde'], {'dtype': 'complex'}), '(Qlmtilde, dtype=complex)\n', (874, 899), True, 'import numpy as np\n')]
|
import os
import time
import datetime
def get_current_epoch():
return int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
def get_sleep_parameter(event):
user_input = str(event.query["sleep"])
if not user_input or not user_input.isdigit() or int(user_input) < 0:
return {"error": "invalid sleep parameter"}
return int(user_input)
def run_test(sleep_time):
time.sleep(sleep_time / 1000.0)
def is_warm():
is_warm = os.environ.get("warm") == "true"
os.environ["warm"] = "true"
return is_warm
def handle(event):
start = get_current_epoch()
reused = is_warm()
sleep_time = get_sleep_parameter(event)
if type(sleep_time) != int:
return {
"statusCode": 200,
"body": sleep_time
}
run_test(sleep_time)
duration = (get_current_epoch() - start) * 1000000
return {
"statusCode": 200,
"body": {
"duration": duration,
"reused": reused
}
}
|
[
"datetime.datetime",
"os.environ.get",
"time.sleep",
"datetime.datetime.utcnow"
] |
[((424, 455), 'time.sleep', 'time.sleep', (['(sleep_time / 1000.0)'], {}), '(sleep_time / 1000.0)\n', (434, 455), False, 'import time\n'), ((486, 508), 'os.environ.get', 'os.environ.get', (['"""warm"""'], {}), "('warm')\n", (500, 508), False, 'import os\n'), ((80, 106), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (104, 106), False, 'import datetime\n'), ((109, 138), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (126, 138), False, 'import datetime\n')]
|
# This script loads the pre-trained scaler and models and contains the
# predict_smile() function to take in an image and return smile predictions
import joblib
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array, array_to_img
from PIL import Image
import numpy as np
# Set new frame size dimensions
img_width, img_height = (100, 100)
# Scaler and model imports
scaler = joblib.load('./models/scaler.save')
model = load_model('./models/my_model.h5')
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
def predict_smile(gray_img, box, count):
"""Make prediction on a new image whether a person is smiling or not.
Parameters
----------
gray_img : numpy.ndarray of dtype int
Grayscale image in numpy.ndarray of current frame.
box : tuple
(left, top, right, bottom) locating face bounding box in pixel locations.
count : int
Number of faces detected in current frame.
Returns
-------
numpy.ndarray of dtype float
Probabilities of no smile (second number) and smile (first number).
i.e. array([[0.972528 , 0.02747207]], dtype=float32)
"""
# Save a copy of current frame
gray_img = gray_img.reshape(gray_img.shape+(1,)) # (height, width, 1)
array_to_img(gray_img).save(f'./images/temp/current_frame_{count}.jpg')
# Load image
gray_img = Image.open(f'./images/temp/current_frame_{count}.jpg')
# Crop face, resize to 100x100 pixels, and save a copy
face_crop = gray_img.resize((img_width, img_height), box=box)
face_crop.save(f'./images/temp/face_crop_current_frame_{count}.jpg')
# Load image and convert to np.array
face_crop = Image.open(f'./images/temp/face_crop_current_frame_{count}.jpg')
new_face_array = np.array(img_to_array(face_crop)) # (100, 100, 1)
# Reshape
new_face_array = new_face_array.reshape(1, img_width*img_height) # (1, 10_000)
# Transform with pre-trained scaler
new_face_array = scaler.transform(new_face_array)
new_face_array = new_face_array.reshape(1, img_width, img_height, 1) # (1, 100, 100, 1)
return model.predict(new_face_array)
|
[
"PIL.Image.open",
"tensorflow.keras.preprocessing.image.array_to_img",
"tensorflow.keras.models.load_model",
"joblib.load",
"tensorflow.keras.preprocessing.image.img_to_array"
] |
[((431, 466), 'joblib.load', 'joblib.load', (['"""./models/scaler.save"""'], {}), "('./models/scaler.save')\n", (442, 466), False, 'import joblib\n'), ((475, 509), 'tensorflow.keras.models.load_model', 'load_model', (['"""./models/my_model.h5"""'], {}), "('./models/my_model.h5')\n", (485, 509), False, 'from tensorflow.keras.models import load_model\n'), ((1445, 1499), 'PIL.Image.open', 'Image.open', (['f"""./images/temp/current_frame_{count}.jpg"""'], {}), "(f'./images/temp/current_frame_{count}.jpg')\n", (1455, 1499), False, 'from PIL import Image\n'), ((1757, 1821), 'PIL.Image.open', 'Image.open', (['f"""./images/temp/face_crop_current_frame_{count}.jpg"""'], {}), "(f'./images/temp/face_crop_current_frame_{count}.jpg')\n", (1767, 1821), False, 'from PIL import Image\n'), ((1852, 1875), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['face_crop'], {}), '(face_crop)\n', (1864, 1875), False, 'from tensorflow.keras.preprocessing.image import img_to_array, array_to_img\n'), ((1340, 1362), 'tensorflow.keras.preprocessing.image.array_to_img', 'array_to_img', (['gray_img'], {}), '(gray_img)\n', (1352, 1362), False, 'from tensorflow.keras.preprocessing.image import img_to_array, array_to_img\n')]
|
import sys
import webbrowser
import os
from comicstreamerlib.folders import AppFolders
from PyQt4 import QtGui,QtCore
class SystemTrayIcon(QtGui.QSystemTrayIcon):
def __init__(self, icon, app):
QtGui.QSystemTrayIcon.__init__(self, icon, None)
self.app = app
self.menu = QtGui.QMenu(None)
exitAction = self.menu.addAction("Exit")
self.setContextMenu(self.menu)
exitAction.triggered.connect( self.quit )
def quit(self):
QtCore.QCoreApplication.quit()
class QtBasedGui():
def __init__(self, apiServer):
self.apiServer = apiServer
self.app = QtGui.QApplication(sys.argv)
pixmap = QtGui.QPixmap(AppFolders.imagePath("trout.png"))
icon = QtGui.QIcon( pixmap.scaled(16,16))
self.trayIcon = SystemTrayIcon(icon,self)
self.trayIcon.show()
def run(self):
try:
self.app.exec_()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
QtGui().run()
|
[
"PyQt4.QtGui.QApplication",
"comicstreamerlib.folders.AppFolders.imagePath",
"PyQt4.QtCore.QCoreApplication.quit",
"PyQt4.QtGui.QSystemTrayIcon.__init__",
"PyQt4.QtGui.QMenu",
"PyQt4.QtGui"
] |
[((208, 256), 'PyQt4.QtGui.QSystemTrayIcon.__init__', 'QtGui.QSystemTrayIcon.__init__', (['self', 'icon', 'None'], {}), '(self, icon, None)\n', (238, 256), False, 'from PyQt4 import QtGui, QtCore\n'), ((300, 317), 'PyQt4.QtGui.QMenu', 'QtGui.QMenu', (['None'], {}), '(None)\n', (311, 317), False, 'from PyQt4 import QtGui, QtCore\n'), ((486, 516), 'PyQt4.QtCore.QCoreApplication.quit', 'QtCore.QCoreApplication.quit', ([], {}), '()\n', (514, 516), False, 'from PyQt4 import QtGui, QtCore\n'), ((646, 674), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (664, 674), False, 'from PyQt4 import QtGui, QtCore\n'), ((715, 748), 'comicstreamerlib.folders.AppFolders.imagePath', 'AppFolders.imagePath', (['"""trout.png"""'], {}), "('trout.png')\n", (735, 748), False, 'from comicstreamerlib.folders import AppFolders\n'), ((1040, 1047), 'PyQt4.QtGui', 'QtGui', ([], {}), '()\n', (1045, 1047), False, 'from PyQt4 import QtGui, QtCore\n')]
|
""" IO Handler for LAS (and compressed LAZ) file format """
import laspy
import numpy as np
from laserchicken import keys
from laserchicken.io.base_io_handler import IOHandler
from laserchicken.io.utils import convert_to_short_type, select_valid_attributes
DEFAULT_LAS_ATTRIBUTES = {
'x',
'y',
'z',
'intensity',
'gps_time',
'raw_classification',
}
class LASHandler(IOHandler):
""" Class for IO of point-cloud data in LAS file format """
def read(self, attributes=DEFAULT_LAS_ATTRIBUTES):
"""
Load the points from a LAS(LAZ) file into memory.
:param attributes: list of attributes to read ('all' for all attributes in file)
:return: point cloud data structure
"""
file = laspy.read(self.path)
dtype = file.header.point_format.dtype()
attributes_available = [el if el not in ['X', 'Y', 'Z'] else el.lower()
for el in dtype.fields.keys()]
attributes = select_valid_attributes(attributes_available, attributes)
points = {}
for name in attributes:
if hasattr(file, name):
file_data = getattr(file, name)
data = np.zeros_like(file_data)
data[:] = file_data
points[name] = _get_attribute(data, data.dtype.name)
return {keys.point: points}
def write(self, point_cloud, attributes='all', file_version='1.2', point_format=3):
"""
Write point cloud to a LAS(LAZ) file.
:param point_cloud:
:param attributes: list of attributes to write ('all' for all attributes in point_cloud)
:param file_version:
:param point_format:
:return:
"""
file = laspy.create(point_format=point_format,
file_version=file_version)
points = point_cloud[keys.point]
attributes = select_valid_attributes([attr for attr in points.keys()], attributes)
# NOTE: adding extra dims and assignment should be done in two steps,
# some fields (e.g. raw_classification) are otherwise overwritten
dtype = file.header.point_format.dtype()
for attribute in attributes:
data, type = _get_data_and_type(points[attribute])
type_short = convert_to_short_type(type)
if attribute not in 'xyz':
# x,y,z are not there but file methods can be used to convert coords to int4
if attribute not in dtype.fields:
param = laspy.ExtraBytesParams(name=attribute, type=type)
file.add_extra_dim(param)
file_type_short = convert_to_short_type(getattr(file, attribute).dtype.name)
if not file_type_short == type_short:
raise TypeError('Data type in file does not match the one in point cloud: '
'for {}, {} vs {}'.format(attribute, file_type_short, type_short))
for dim in 'xyz':
data, _ = _get_data_and_type(points[dim])
setattr(file.header, '{}_offset'.format(dim), data.min())
setattr(file.header, '{}_scale'.format(dim), 0.001)
for attribute in attributes:
data, _ = _get_data_and_type(points[attribute])
if data.size == 0:
raise ValueError('Cannot write empty point-cloud!')
else:
setattr(file, attribute, data)
try:
file.write(self.path)
except ValueError as err:
raise ValueError('Error in writing LAS file (file_version {}, point_format_id {}). '
'laspy error below:\n{}'.format(file_version, point_format, err))
def _get_attribute(data, data_type):
return {'type': data_type, 'data': data}
def _get_data_and_type(attribute):
return attribute['data'], attribute['type']
|
[
"laspy.create",
"laserchicken.io.utils.convert_to_short_type",
"laserchicken.io.utils.select_valid_attributes",
"laspy.ExtraBytesParams",
"laspy.read",
"numpy.zeros_like"
] |
[((757, 778), 'laspy.read', 'laspy.read', (['self.path'], {}), '(self.path)\n', (767, 778), False, 'import laspy\n'), ((993, 1050), 'laserchicken.io.utils.select_valid_attributes', 'select_valid_attributes', (['attributes_available', 'attributes'], {}), '(attributes_available, attributes)\n', (1016, 1050), False, 'from laserchicken.io.utils import convert_to_short_type, select_valid_attributes\n'), ((1753, 1819), 'laspy.create', 'laspy.create', ([], {'point_format': 'point_format', 'file_version': 'file_version'}), '(point_format=point_format, file_version=file_version)\n', (1765, 1819), False, 'import laspy\n'), ((2308, 2335), 'laserchicken.io.utils.convert_to_short_type', 'convert_to_short_type', (['type'], {}), '(type)\n', (2329, 2335), False, 'from laserchicken.io.utils import convert_to_short_type, select_valid_attributes\n'), ((1211, 1235), 'numpy.zeros_like', 'np.zeros_like', (['file_data'], {}), '(file_data)\n', (1224, 1235), True, 'import numpy as np\n'), ((2546, 2595), 'laspy.ExtraBytesParams', 'laspy.ExtraBytesParams', ([], {'name': 'attribute', 'type': 'type'}), '(name=attribute, type=type)\n', (2568, 2595), False, 'import laspy\n')]
|
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras import utils
import tensorflow as tf
import numpy as np
import argparse
import logging
import os
# Set Log Level
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Seed for Reproducability
SEED = 123
np.random.seed(SEED)
tf.random.set_seed(SEED)
# Setup Logger
logger = logging.getLogger('sagemaker')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def parse_args():
parser = argparse.ArgumentParser()
# Hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--data', type=str, default=os.environ.get('SM_CHANNEL_DATA'))
parser.add_argument('--output', type=str, default=os.environ.get('SM_CHANNEL_OUTPUT'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
return parser.parse_known_args()
def get_train_data(train_dir):
X_train = np.load(os.path.join(train_dir, 'X_train.npy'))
y_train = np.load(os.path.join(train_dir, 'y_train.npy'))
logger.info(f'X_train: {X_train.shape} | y_train: {y_train.shape}')
return X_train, y_train
def get_validation_data(val_dir):
X_validation = np.load(os.path.join(val_dir, 'X_validation.npy'))
y_validation = np.load(os.path.join(val_dir, 'y_validation.npy'))
logger.info(f'X_validation: {X_validation.shape} | y_validation: {y_validation.shape}')
return X_validation, y_validation
def get_test_data(test_dir):
X_test = np.load(os.path.join(test_dir, 'X_test.npy'))
y_test = np.load(os.path.join(test_dir, 'y_test.npy'))
logger.info(f'X_test: {X_test.shape} | y_test: {y_test.shape}')
return X_test, y_test
if __name__ == '__main__':
logger.info(f'[Using TensorFlow version: {tf.__version__}]')
DEVICE = '/cpu:0'
args, _ = parse_args()
epochs = args.epochs
# Load train, validation and test sets from S3
X_train, y_train = get_train_data(args.train)
X_validation, y_validation = get_validation_data(args.val)
X_test, y_test = get_test_data(args.test)
with tf.device(DEVICE):
# Data Augmentation
TRAIN_BATCH_SIZE = 32
data_generator = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
train_iterator = data_generator.flow(X_train, y_train, batch_size=TRAIN_BATCH_SIZE)
# Define Model Architecture
model = Sequential()
# CONVOLUTIONAL LAYER 1
model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(32, 32, 3)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
# CONVOLUTIONAL LAYER 1
model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
# CONVOLUTIONAL LAYER 3
model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
# FULLY CONNECTED LAYER
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
model.summary()
# Compile Model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Train Model
BATCH_SIZE = 32
STEPS_PER_EPOCH = int(X_train.shape[0]/TRAIN_BATCH_SIZE)
model.fit(train_iterator,
steps_per_epoch=STEPS_PER_EPOCH,
batch_size=BATCH_SIZE,
epochs=epochs,
validation_data=(X_validation, y_validation),
callbacks=[],
verbose=2,
shuffle=True)
# Evaluate on Test Set
result = model.evaluate(X_test, y_test, verbose=1)
print(f'Test Accuracy: {result[1]}')
# Save Model
model.save(f'{args.model_dir}/1')
|
[
"logging.getLogger",
"tensorflow.device",
"logging.StreamHandler",
"tensorflow.random.set_seed",
"argparse.ArgumentParser",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"os.path.join",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"os.environ.get",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"numpy.random.seed",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.models.Sequential"
] |
[((538, 558), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (552, 558), True, 'import numpy as np\n'), ((559, 583), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['SEED'], {}), '(SEED)\n', (577, 583), True, 'import tensorflow as tf\n'), ((609, 639), 'logging.getLogger', 'logging.getLogger', (['"""sagemaker"""'], {}), "('sagemaker')\n", (626, 639), False, 'import logging\n'), ((688, 711), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (709, 711), False, 'import logging\n'), ((746, 771), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (769, 771), False, 'import argparse\n'), ((1542, 1580), 'os.path.join', 'os.path.join', (['train_dir', '"""X_train.npy"""'], {}), "(train_dir, 'X_train.npy')\n", (1554, 1580), False, 'import os\n'), ((1604, 1642), 'os.path.join', 'os.path.join', (['train_dir', '"""y_train.npy"""'], {}), "(train_dir, 'y_train.npy')\n", (1616, 1642), False, 'import os\n'), ((1807, 1848), 'os.path.join', 'os.path.join', (['val_dir', '"""X_validation.npy"""'], {}), "(val_dir, 'X_validation.npy')\n", (1819, 1848), False, 'import os\n'), ((1877, 1918), 'os.path.join', 'os.path.join', (['val_dir', '"""y_validation.npy"""'], {}), "(val_dir, 'y_validation.npy')\n", (1889, 1918), False, 'import os\n'), ((2103, 2139), 'os.path.join', 'os.path.join', (['test_dir', '"""X_test.npy"""'], {}), "(test_dir, 'X_test.npy')\n", (2115, 2139), False, 'import os\n'), ((2162, 2198), 'os.path.join', 'os.path.join', (['test_dir', '"""y_test.npy"""'], {}), "(test_dir, 'y_test.npy')\n", (2174, 2198), False, 'import os\n'), ((2692, 2709), 'tensorflow.device', 'tf.device', (['DEVICE'], {}), '(DEVICE)\n', (2701, 2709), True, 'import tensorflow as tf\n'), ((2794, 2885), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'horizontal_flip': '(True)'}), '(width_shift_range=0.1, height_shift_range=0.1,\n horizontal_flip=True)\n', (2812, 2885), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3035, 3047), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3045, 3047), False, 'from tensorflow.keras.models import Sequential\n'), ((974, 1007), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_DATA"""'], {}), "('SM_CHANNEL_DATA')\n", (988, 1007), False, 'import os\n'), ((1063, 1098), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_OUTPUT"""'], {}), "('SM_CHANNEL_OUTPUT')\n", (1077, 1098), False, 'import os\n'), ((1153, 1187), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_TRAIN"""'], {}), "('SM_CHANNEL_TRAIN')\n", (1167, 1187), False, 'import os\n'), ((1240, 1272), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_VAL"""'], {}), "('SM_CHANNEL_VAL')\n", (1254, 1272), False, 'import os\n'), ((1326, 1359), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_TEST"""'], {}), "('SM_CHANNEL_TEST')\n", (1340, 1359), False, 'import os\n'), ((1418, 1448), 'os.environ.get', 'os.environ.get', (['"""SM_MODEL_DIR"""'], {}), "('SM_MODEL_DIR')\n", (1432, 1448), False, 'import os\n'), ((3107, 3204), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(32, 32, 3)'}), "(filters=16, kernel_size=2, padding='same', activation='relu',\n input_shape=(32, 32, 3))\n", (3113, 3204), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3220, 3240), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3238, 3240), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3260, 3285), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (3272, 3285), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3338, 3406), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=2, padding='same', activation='relu')\n", (3344, 3406), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3426, 3446), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3444, 3446), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3466, 3491), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (3478, 3491), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3544, 3612), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=2, padding='same', activation='relu')\n", (3550, 3612), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3632, 3652), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3650, 3652), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3672, 3697), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (3684, 3697), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3717, 3729), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (3724, 3729), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3783, 3792), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3790, 3792), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3812, 3841), 'tensorflow.keras.layers.Dense', 'Dense', (['(500)'], {'activation': '"""relu"""'}), "(500, activation='relu')\n", (3817, 3841), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3861, 3873), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (3868, 3873), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3893, 3924), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (3898, 3924), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n')]
|
"""
Thư viện này viết ra phục vụ cho môn học `Các mô hình ngẫu nhiên và ứng dụng`
Sử dụng các thư viện `networkx, pandas, numpy, matplotlib`
"""
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import imread
import pandas as pd
def _gcd(a, b):
if a == 0:
return b
return _gcd(b % a, a)
def gcd(arr):
if len(arr) == 0:
return 0
if (len(arr) == 1):
return arr[0]
t = arr[0]
for i in range(len(arr)):
t = _gcd(t, arr[i])
return t
class MarkovChain:
"""
Constructor function: Generate blank instance
Có 2 cách để xích:
- Nhập từ file csv:
Sử dụng from_file
- Nhập từ bàn phím:
Sử dụng from_stdin
"""
def __init__(self):
self.data = None
self.state = None
self.struct = None
def from_stdin(self, state=None, data=None, pi=None):
if state == None or data == None:
return "Nothing is given"
else:
self.P = data
self.pi = pi
self.data = self.P
self.state = state
self.struct = self.__generate_struct__()
def from_file(self, path='input.csv'):
data = pd.read_csv(path)
matrix = pd.DataFrame(data)
data = matrix.values.tolist()
self.pi = data[0]
self.state = matrix.columns
self.P = data[1:]
self.data = self.P
self.struct = self.__generate_struct__()
"""
Sinh ra cấu trúc của đồ thị
Cấu trúc của đồ thị hiện tại như sau:
['đỉnh 1', 'đỉnh 2', '{'label':label}']
"""
def __generate_struct__(self):
struct = []
for i in range(len(self.data)):
for j in range(len(self.data)):
if self.data[i][j] > 0:
struct.append([self.state[i], self.state[j],
{'label': self.data[i][j]}])
return struct
"""
Sinh ma trận xác suất chuyển trạng thái của quá trình
"""
def matrix_at(self, n):
self.data = np.matrix.round(np.linalg.matrix_power(self.P, n), 3)
self.struct = self.__generate_struct__()
"""
Sinh đồ thị, đồ thị được lưu vào thư mục img
"""
def __get_state_vector__(self, n):
self.matrix_at(n)
self.state_vector = np.matmul(self.pi, self.data)
def __get_state_track__(self, n):
state = np.empty(shape=(len(self.pi), 1))
state = state.tolist()
steps = []
for i in range(n):
steps.append(i+1)
self.__get_state_vector__(i)
state.append(self.state_vector)
state = np.transpose(state)
return state.tolist(), steps
def generate_state_graph(self, n):
if self.pi == None:
return "Not found origin state"
else:
state, steps = self.__get_state_track__(n)
legend = self.state
for i in range(len(self.pi)):
plt.plot(steps, state[i][1:])
plt.legend(legend, loc='best')
plt.title("Distribution state vector through time")
plt.xlabel("Steps")
plt.ylabel("Probability")
plt.savefig('img/state_vector.svg', format='svg', dpi=1200)
plt.show()
def generate_graph(self, n=1):
if self.state is None:
return "Graph is empty. \n Nothing to show"
else:
self.matrix_at(n)
self = nx.drawing.nx_agraph.to_agraph(nx.DiGraph(self.struct))
self.layout('dot')
self.node_attr.update(color='red', height=0.5,
width=0.5, fontname="Calibri", fontsize=10)
self.edge_attr.update(color='blue', fontsize=8,
fontname="Calibri", rotate=True)
self.draw('img/Graph.svg')
self.draw('img/Graph.png')
img = imread('img/Graph.png')
plt.axis("off")
plt.imshow(img)
def __convert_to_adjagecy__(self):
adjagecy_vector = {i: [] for i in self.state}
for i in range(len(self.P)):
for j in range(len(self.P)):
if self.P[i][j] != 0:
adjagecy_vector[self.state[i]].append(self.state[j])
return adjagecy_vector
def is_connected(self, source, target):
vector = self.__convert_to_adjagecy__()
visit_status = {i: False for i in self.state}
queue = []
queue.append(source)
while queue != []:
current_state = queue[0]
visit_status[current_state] = True
queue.pop(0)
for s in vector[current_state]:
if target == s:
return True
if visit_status[s] == False:
queue.append(s)
return False
# This part is unused -> comment for later use
# ------------------------------------------
# def has_selfloop(self):
# for i in range(len(self.P)):
# if self.P[i][i] != 0:
# return True
# return False
# def rank_test(self):
# P = np.subtract(self.P, np.identity(len(self.P)))
# if np.linalg.matrix_rank(P) == len(self.P):
# return True
# return False
# -------------------------------------------
def is_regular(self):
# Check is irreducible
component = self.get_connected_component()
if len(component) > 1:
return False
tmp = self.get_period(self.state[0])
if tmp == 1:
return True
return False
# ----------------------------------------------------------
# Get period of a state
# ----------------------------------------------------------
def __cycle_length__(self, source):
vector = self.__convert_to_adjagecy__()
visit_status = {i: False for i in self.state}
step = 0
queue = [source]
while queue != []:
current_state = queue[0]
visit_status[current_state] = True
queue.pop(0)
step += 1
for s in vector[current_state]:
if s == source:
return step
if visit_status[s] == False:
queue.append(s)
return step
def get_connected_component(self):
connected_component = [[]]
status = {i: False for i in self.state}
while True:
counter = 0
for i in self.state:
for j in self.state:
if (self.is_connected(i, j) and self.is_connected(j, i)):
if status[i] == False:
connected_component[counter].append(i)
status[i] = True
if status[j] == False:
connected_component[counter].append(j)
status[j] = True
connected_component.append([])
counter += 1
if i == self.state[len(self.state) - 1] and j == self.state[len(self.state) - 1]:
break
connected_component = list(filter(None, connected_component))
return connected_component
def get_period(self, target):
component = self.get_connected_component()
for sl in component:
if target in sl:
break
t = []
if target not in sl:
return 0
else:
for i in sl:
t.append(self.__cycle_length__(i))
return gcd(t)
# ----------------------------------------------------
# Get steady state
# ----------------------------------------------------
def get_steady_state(self):
A = np.transpose(self.P)
A = np.subtract(A, np.identity(len(A)))
A = np.ndarray.tolist(A)
A.append(np.ndarray.tolist(np.ones(len(A))))
b = np.ndarray.tolist(np.transpose(np.zeros(len(A))))
b[len(b)-1] = 1
# Calc
return np.matmul(np.linalg.inv(np.matmul(np.transpose(A), A)), np.matmul(np.transpose(A), b))
# ----------------------------------------------------
# Get mean time spent
# ----------------------------------------------------
def __get_index__(self, state_set):
idx_list = []
tmp = list(self.state)
try:
for state in state_set:
idx_list.append(tmp.index(state))
del tmp
return idx_list
except:
return "State is not in the state set"
def __get_absoring_state__(self):
abr_state = []
for i in range((len(self.state))):
if self.P[i][i] == 1:
abr_state.append(self.state[i])
return abr_state
def __get_mean_state_list__(self, state_set):
tmp = list(self.state)
tmp = [state for state in tmp if state not in rm_state]
return tmp
def __get_mean_time_absoring__(self):
try:
idx_list = self.__get_index__(self.__get_absoring_state__())
state_list = self.__get_mean_state_list__(target_set)
P = self.data
P = np.delete(P, idx_list, 0)
P = np.delete(P, idx_list, 1)
P = np.transpose(P)
I = np.identity(len(P))
A = np.subtract(I, P)
b = np.transpose(np.ones(len(P)))
x = np.round(np.linalg.solve(A, b), 2)
del idx_list, P, I, A, b
mean_time = {"Mean time spent " +
state: x_val for (state, x_val) in zip(state_list, x)}
return mean_time
except:
return "Check your state or matrix"
def __get_mean_time_transient__(self, source=None, target=None):
idx_list = self.__get_index__(self.__get_absoring_state__())
P = self.data
P = np.delete(P, idx_list, 0)
P = np.delete(P, idx_list, 1)
P = np.transpose(P)
I = np.identity(len(P))
A = np.subtract(I, P)
A = A.tolist()
if source == None or target == None:
return A
|
[
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.image.imread",
"numpy.ndarray.tolist",
"matplotlib.pyplot.imshow",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"networkx.DiGraph",
"numpy.subtract",
"numpy.matmul",
"pandas.DataFrame",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"numpy.transpose",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.linalg.solve",
"numpy.linalg.matrix_power"
] |
[((1247, 1264), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (1258, 1264), True, 'import pandas as pd\n'), ((1282, 1300), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1294, 1300), True, 'import pandas as pd\n'), ((2356, 2385), 'numpy.matmul', 'np.matmul', (['self.pi', 'self.data'], {}), '(self.pi, self.data)\n', (2365, 2385), True, 'import numpy as np\n'), ((2683, 2702), 'numpy.transpose', 'np.transpose', (['state'], {}), '(state)\n', (2695, 2702), True, 'import numpy as np\n'), ((7837, 7857), 'numpy.transpose', 'np.transpose', (['self.P'], {}), '(self.P)\n', (7849, 7857), True, 'import numpy as np\n'), ((7918, 7938), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['A'], {}), '(A)\n', (7935, 7938), True, 'import numpy as np\n'), ((9960, 9985), 'numpy.delete', 'np.delete', (['P', 'idx_list', '(0)'], {}), '(P, idx_list, 0)\n', (9969, 9985), True, 'import numpy as np\n'), ((9998, 10023), 'numpy.delete', 'np.delete', (['P', 'idx_list', '(1)'], {}), '(P, idx_list, 1)\n', (10007, 10023), True, 'import numpy as np\n'), ((10036, 10051), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (10048, 10051), True, 'import numpy as np\n'), ((10096, 10113), 'numpy.subtract', 'np.subtract', (['I', 'P'], {}), '(I, P)\n', (10107, 10113), True, 'import numpy as np\n'), ((2109, 2142), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['self.P', 'n'], {}), '(self.P, n)\n', (2131, 2142), True, 'import numpy as np\n'), ((3053, 3083), 'matplotlib.pyplot.legend', 'plt.legend', (['legend'], {'loc': '"""best"""'}), "(legend, loc='best')\n", (3063, 3083), True, 'import matplotlib.pyplot as plt\n'), ((3096, 3147), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution state vector through time"""'], {}), "('Distribution state vector through time')\n", (3105, 3147), True, 'import matplotlib.pyplot as plt\n'), ((3160, 3179), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Steps"""'], {}), "('Steps')\n", (3170, 3179), True, 'import matplotlib.pyplot as plt\n'), ((3192, 3217), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (3202, 3217), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3289), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""img/state_vector.svg"""'], {'format': '"""svg"""', 'dpi': '(1200)'}), "('img/state_vector.svg', format='svg', dpi=1200)\n", (3241, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3302, 3312), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3310, 3312), True, 'import matplotlib.pyplot as plt\n'), ((3946, 3969), 'matplotlib.image.imread', 'imread', (['"""img/Graph.png"""'], {}), "('img/Graph.png')\n", (3952, 3969), False, 'from matplotlib.image import imread\n'), ((3982, 3997), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3990, 3997), True, 'import matplotlib.pyplot as plt\n'), ((4010, 4025), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4020, 4025), True, 'import matplotlib.pyplot as plt\n'), ((9264, 9289), 'numpy.delete', 'np.delete', (['P', 'idx_list', '(0)'], {}), '(P, idx_list, 0)\n', (9273, 9289), True, 'import numpy as np\n'), ((9306, 9331), 'numpy.delete', 'np.delete', (['P', 'idx_list', '(1)'], {}), '(P, idx_list, 1)\n', (9315, 9331), True, 'import numpy as np\n'), ((9348, 9363), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (9360, 9363), True, 'import numpy as np\n'), ((9416, 9433), 'numpy.subtract', 'np.subtract', (['I', 'P'], {}), '(I, P)\n', (9427, 9433), True, 'import numpy as np\n'), ((3011, 3040), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'state[i][1:]'], {}), '(steps, state[i][1:])\n', (3019, 3040), True, 'import matplotlib.pyplot as plt\n'), ((3530, 3553), 'networkx.DiGraph', 'nx.DiGraph', (['self.struct'], {}), '(self.struct)\n', (3540, 3553), True, 'import networkx as nx\n'), ((8174, 8189), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (8186, 8189), True, 'import numpy as np\n'), ((9505, 9526), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (9520, 9526), True, 'import numpy as np\n'), ((8142, 8157), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (8154, 8157), True, 'import numpy as np\n')]
|
#进行users 子应用的视图路由
from django.urls import path
from users.views import RegisterView, ImageCodeView,SmsCodeView
urlpatterns = [
#path的第一个参数:路由
#path的第二个函数:视图函数名
path('register/', RegisterView.as_view(),name='register'),
#图片验证码的路由
path('imagecode/',ImageCodeView.as_view(),name='imagecode'),
#短信发送
path('smscode/',SmsCodeView.as_view(),name='smscode'),
]
|
[
"users.views.ImageCodeView.as_view",
"users.views.RegisterView.as_view",
"users.views.SmsCodeView.as_view"
] |
[((192, 214), 'users.views.RegisterView.as_view', 'RegisterView.as_view', ([], {}), '()\n', (212, 214), False, 'from users.views import RegisterView, ImageCodeView, SmsCodeView\n'), ((270, 293), 'users.views.ImageCodeView.as_view', 'ImageCodeView.as_view', ([], {}), '()\n', (291, 293), False, 'from users.views import RegisterView, ImageCodeView, SmsCodeView\n'), ((343, 364), 'users.views.SmsCodeView.as_view', 'SmsCodeView.as_view', ([], {}), '()\n', (362, 364), False, 'from users.views import RegisterView, ImageCodeView, SmsCodeView\n')]
|
#!/usr/bin/python
from builtins import object
from builtins import str
import sys
import traceback
sys.path.append("/opt/contrail/fabric_ansible_playbooks/module_utils") # noqa
from filter_utils import _task_done, _task_error_log, _task_log, FilterLog
from job_manager.job_utils import JobVncApi
class FilterModule(object):
def filters(self):
return {
'import_lldp_info': self.import_lldp_info,
}
# end filters
def _instantiate_filter_log_instance(self, device_name):
FilterLog.instance("Import_lldp_info_Filter", device_name)
# end _instantiate_filter_log_instance
def import_lldp_info(self, job_ctx, prouter_fqname,
prouter_vendor,
lldp_neighbors_payload):
"""Topology discovery.
:param job_ctx: Dictionary
# example:
# {
# "auth_token": "<PASSWORD>",
# "job_input": {
# "fabric_fq_name": [
# "default-global-system-config",
# "fab01"
# ],
# "device_auth": [{
# "username": "root",
# "password": "<PASSWORD>"
# }],
# "management_subnets": [
# {
# "cidr": "10.87.69.0/25",
# "gateway": "10.87.69.1"
# }
# ],
# "overlay_ibgp_asn": 64512,
# "node_profiles": [
# {
# "node_profile_name": "juniper-qfx5k"
# }
# ]
# }
# }
:param prouter_fqname: List
example:
# [
# "default-global-system-config",
# "5c3-qfx2"
# ]
:param prouter_vendor: String
example: "juniper"
:param lldp_neighbors_payload: Dictionary
# example:
# {
# "neighbor_info_list":
# [
# {
# "local_physical_interface_name": "xe-0/0/0",
# "remote_device_name": "5b5-qfx11",
# "remote_physical_interface_port_id": "536"
# },
# {
# "local_physical_interface_name": "xe-0/0/2",
# "remote_device_chassis_id": "00:1a:53:46:7b:9e",
# "remote_physical_interface_port_id": "538"
# }
# ]
# }
:return: Dictionary
# if success, returns
# {
# 'status': 'success',
# 'topology_discovery_log':
# <String: topology_discovery_log>,
# 'topology_discovery_resp':
# <Dictionary: topology_discovery_resp>
# }
# if failure, returns
# {
# 'status': 'failure',
# 'error_msg': <String: exception message>,
# 'topology_discovery_log':
# <String: topology_discovery_log>,
# 'topology_discovery_resp':
# <Dictionary: topology_discovery_resp>
# }
:param topology_discovery_resp: Dictionary
# example:
# {
# "lldp_neighbors_success_names":
# <List: <String: lldp_neighbors_success_pair_string>>,
# "lldp_neighbors_failed_info":
# <List: <Dictionary: lldp_neighbor_failed_obj> >
# }
# :param lldp_neighbors_success_names: List
# example:
# ["bng-contrail-qfx51-15 : ge-0/0/36 --> dhawan : ge-2/3/1"]
# :param lldp_neighbors_failed_info: List
# example:
# [
# {
# "lldp_neighbor":
# "bng-contrail-qfx51-15 : em0 --> sw174 : ge-1/0/46",
# "warning_message":
# "Unknown physical interface ng-contrail-qfx51-15:em0"
# }
# ]
"""
self._instantiate_filter_log_instance(prouter_fqname[-1])
_task_log("Starting Topology Discovery")
try:
_task_log("Creating neighboring links")
topology_discovery_resp = self._create_neighbor_links(
job_ctx,
lldp_neighbors_payload,
prouter_fqname,
prouter_vendor)
_task_done()
return {
'status': 'success',
'topology_discovery_log': FilterLog.instance().dump(),
'topology_discovery_resp': topology_discovery_resp
}
except Exception as ex:
_task_error_log(str(ex))
_task_error_log(traceback.format_exc())
return {'status': 'failure',
'error_msg': str(ex),
'topology_discovery_log': FilterLog.instance().dump()}
# end import_lldp_info
def get_vnc_payload(self, vnc_lib, prouter_fqname,
prouter_vendor,
lldp_neighbors_info):
vnc_payload = []
chassis_id_device_name_map = self.get_chassis_id_to_device_name(
vnc_lib, prouter_vendor)
for lldp_neighbor_info in lldp_neighbors_info or []:
local_phy_int = lldp_neighbor_info.get(
'local_physical_interface_name')
phy_int_fqname = []
phy_int_fqname.extend(prouter_fqname)
phy_int_fqname.append(local_phy_int.replace(":", "_"))
remote_device_chassis_id = lldp_neighbor_info.get(
'remote_device_chassis_id')
remote_device_name = chassis_id_device_name_map.get(
remote_device_chassis_id)
if not remote_device_name:
remote_device_name = lldp_neighbor_info.get(
'remote_device_name')
if remote_device_name:
remote_phy_int_fqname_str = \
remote_device_name.replace(
":", "_") + ":" +\
lldp_neighbor_info.get(
'remote_physical_interface_port_id')
vnc_payload.append((phy_int_fqname, remote_phy_int_fqname_str))
return vnc_payload
# end get_vnc_payload
# get chassis mac id to physical router name map
# for all the physical routers in the fabric
def get_chassis_id_to_device_name(self, vnc_lib, prouter_vendor):
chassis_id_to_device_name_map = {}
phy_routers_list = vnc_lib.physical_routers_list(
fields=['device_chassis_refs']).get('physical-routers')
for phy_router in phy_routers_list or []:
if phy_router.get('device_chassis_refs'):
device_chassis_id_info = phy_router.get(
'device_chassis_refs')
for chassis_id_info in device_chassis_id_info or []:
chassis_mac = chassis_id_info['to'][-1].split(
prouter_vendor + '_')[1].replace('_', ':')
chassis_id_to_device_name_map[chassis_mac] = \
phy_router['fq_name'][-1]
return chassis_id_to_device_name_map
# end get_chassis_id_to_device_name
# group vnc functions
def _create_neighbor_links(self, job_ctx,
lldp_neighbors_payload,
prouter_fqname,
prouter_vendor):
if not lldp_neighbors_payload.get('neighbor_info_list'):
_task_log("No neighbors found")
_task_done()
return {
'lldp_neighbors_success_names': [],
'lldp_neighbors_failed_info': []
}
vnc_lib = JobVncApi.vnc_init(job_ctx)
vnc_topology_disc_payload = self.get_vnc_payload(
vnc_lib,
prouter_fqname,
prouter_vendor,
lldp_neighbors_payload['neighbor_info_list'])
topology_disc_payload = self._do_further_parsing(
vnc_lib, vnc_topology_disc_payload)
_task_done("Parsed payload completely")
_task_log("Creating links between neighboring physical interfaces")
topology_discovery_resp = self._create_physical_interface_refs(
vnc_lib, topology_disc_payload)
return topology_discovery_resp
# end _create_neighbor_links
def _do_further_parsing(self, vnc_lib, neighbor_info_list):
topology_disc_payload = []
for neighbor_info in neighbor_info_list or []:
remote_neighbor_info = neighbor_info[1].split(":", 1)
list_resp = vnc_lib.physical_interfaces_list(
parent_fq_name=["default-global-system-config",
remote_neighbor_info[0]],
filters={"physical_interface_port_id":
remote_neighbor_info[1]}
)
if list_resp['physical-interfaces']:
topology_disc_payload.append([neighbor_info[0],
list_resp['physical-interfaces']
[0]['fq_name']])
return topology_disc_payload
# end _do_further_parsing
def _create_physical_interface_refs(self, vnc_lib, topology_disc_payload):
# create or update refs between physical interfaces
# on the local device to the remote device
object_type = "physical_interface"
lldp_neighbors_success_names = []
lldp_neighbors_failed_info = []
for topology_disc_info in topology_disc_payload or []:
try:
object_fqname = topology_disc_info[0]
ref_fqname = topology_disc_info[1]
pi_obj = vnc_lib.physical_interface_read(fq_name=object_fqname)
# Check ref already present or not
refs = pi_obj.get_physical_interface_refs()
is_link_found = False
if refs:
for ref in refs:
if ref['to'] == ref_fqname:
is_link_found = True
if not is_link_found:
ref_uuid = vnc_lib.fq_name_to_id(object_type, ref_fqname)
pi_obj.set_physical_interface_list([{"to": ref_fqname,
"uuid": ref_uuid}])
vnc_lib.physical_interface_update(pi_obj)
lldp_neighbors_success_names.append(object_fqname[-2] + " : " +
object_fqname[-1] +
" --> " +
ref_fqname[-2] + " : " +
ref_fqname[-1])
except Exception as ex:
_task_error_log(str(ex))
_task_error_log(traceback.format_exc())
lldp_neighbor_failed_obj = {
"lldp_neighbor": object_fqname[-2] + " : " +
object_fqname[-1] + " --> " +
ref_fqname[-2] + " : " +
ref_fqname[-1],
"warning_message": str(ex)
}
lldp_neighbors_failed_info.append(lldp_neighbor_failed_obj)
return {
'lldp_neighbors_success_names': lldp_neighbors_success_names,
'lldp_neighbors_failed_info': lldp_neighbors_failed_info
}
# end _create_physical_interface_refs
|
[
"filter_utils._task_done",
"traceback.format_exc",
"filter_utils._task_log",
"filter_utils.FilterLog.instance",
"builtins.str",
"sys.path.append",
"job_manager.job_utils.JobVncApi.vnc_init"
] |
[((101, 171), 'sys.path.append', 'sys.path.append', (['"""/opt/contrail/fabric_ansible_playbooks/module_utils"""'], {}), "('/opt/contrail/fabric_ansible_playbooks/module_utils')\n", (116, 171), False, 'import sys\n'), ((524, 582), 'filter_utils.FilterLog.instance', 'FilterLog.instance', (['"""Import_lldp_info_Filter"""', 'device_name'], {}), "('Import_lldp_info_Filter', device_name)\n", (542, 582), False, 'from filter_utils import _task_done, _task_error_log, _task_log, FilterLog\n'), ((4394, 4434), 'filter_utils._task_log', '_task_log', (['"""Starting Topology Discovery"""'], {}), "('Starting Topology Discovery')\n", (4403, 4434), False, 'from filter_utils import _task_done, _task_error_log, _task_log, FilterLog\n'), ((8064, 8091), 'job_manager.job_utils.JobVncApi.vnc_init', 'JobVncApi.vnc_init', (['job_ctx'], {}), '(job_ctx)\n', (8082, 8091), False, 'from job_manager.job_utils import JobVncApi\n'), ((8401, 8440), 'filter_utils._task_done', '_task_done', (['"""Parsed payload completely"""'], {}), "('Parsed payload completely')\n", (8411, 8440), False, 'from filter_utils import _task_done, _task_error_log, _task_log, FilterLog\n'), ((8450, 8517), 'filter_utils._task_log', '_task_log', (['"""Creating links between neighboring physical interfaces"""'], {}), "('Creating links between neighboring physical interfaces')\n", (8459, 8517), False, 'from filter_utils import _task_done, _task_error_log, _task_log, FilterLog\n'), ((4460, 4499), 'filter_utils._task_log', '_task_log', (['"""Creating neighboring links"""'], {}), "('Creating neighboring links')\n", (4469, 4499), False, 'from filter_utils import _task_done, _task_error_log, _task_log, FilterLog\n'), ((4709, 4721), 'filter_utils._task_done', '_task_done', ([], {}), '()\n', (4719, 4721), False, 'from filter_utils import _task_done, _task_error_log, _task_log, FilterLog\n'), ((7853, 7884), 'filter_utils._task_log', '_task_log', (['"""No neighbors found"""'], {}), "('No neighbors found')\n", (7862, 7884), False, 'from filter_utils import _task_done, _task_error_log, _task_log, FilterLog\n'), ((7897, 7909), 'filter_utils._task_done', '_task_done', ([], {}), '()\n', (7907, 7909), False, 'from filter_utils import _task_done, _task_error_log, _task_log, FilterLog\n'), ((4992, 4999), 'builtins.str', 'str', (['ex'], {}), '(ex)\n', (4995, 4999), False, 'from builtins import str\n'), ((5029, 5051), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5049, 5051), False, 'import traceback\n'), ((5127, 5134), 'builtins.str', 'str', (['ex'], {}), '(ex)\n', (5130, 5134), False, 'from builtins import str\n'), ((4822, 4842), 'filter_utils.FilterLog.instance', 'FilterLog.instance', ([], {}), '()\n', (4840, 4842), False, 'from filter_utils import _task_done, _task_error_log, _task_log, FilterLog\n'), ((11201, 11208), 'builtins.str', 'str', (['ex'], {}), '(ex)\n', (11204, 11208), False, 'from builtins import str\n'), ((11242, 11264), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11262, 11264), False, 'import traceback\n'), ((11546, 11553), 'builtins.str', 'str', (['ex'], {}), '(ex)\n', (11549, 11553), False, 'from builtins import str\n'), ((5182, 5202), 'filter_utils.FilterLog.instance', 'FilterLog.instance', ([], {}), '()\n', (5200, 5202), False, 'from filter_utils import _task_done, _task_error_log, _task_log, FilterLog\n')]
|
"""Provides the MENU html string which is appended to all templates
Please note that the MENU only works in [Fast](https://www.fast.design/) based templates.
If you need some sort of custom MENU html string feel free to customize this code.
"""
from awesome_panel_extensions.frameworks.fast.fast_menu import to_menu
from src.shared import config
if config.applications:
MENU = to_menu(
config.applications.values(), accent_color=config.color_primary, expand=["Main"]
).replace("\n", "")
else:
MENU = ""
|
[
"src.shared.config.applications.values"
] |
[((402, 430), 'src.shared.config.applications.values', 'config.applications.values', ([], {}), '()\n', (428, 430), False, 'from src.shared import config\n')]
|
# <NAME> <EMAIL>
# The MIT License (MIT)
#
# Copyright (c) 2020
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
from tkinter import *
from tkinter import ttk
from tkinter.messagebox import *
import serial
root=Tk()
root.title("ESP Autostart Changer")
err=""
def serialOn():
global ser
for port in range(3,9):
comport="COM"+str(port)+":"
try:
ser = serial.Serial(port=comport,baudrate=115200)
serialopen=True
except Exception as e:
#print ("error open serial port: " + str(e))
serialopen=False
if serialopen == True:
#ESPsend(chr(4))
ESPsend(chr(3))
time.sleep(1)
if ser.inWaiting() != 0:
ser.read()
return (comport)
else:
serialopen=False
return ("Error")
def ESPsend(out):
out+="\r\n"
out=out.encode("utf-8")
ser.write(out)
time.sleep(0.1)
def autooff():
if ser.isOpen() == False:start()
ESPsend("import os")
ESPsend("os.rename('main.py','mainxxx.py')")
time.sleep(0.5)
res=""
while ser.inWaiting() != 0:
a=ser.read()
res+=a.decode("utf-8")
pos=res.find("OSError")
if pos==-1:
hinweistxt="Autostart is off"
else:
hinweistxt="Autostart already off"
hinweis.config(text=hinweistxt)
stop()
def autoon():
if ser.isOpen() == False:start()
ESPsend("import os")
ESPsend("os.rename('mainxxx.py','main.py')")
res=""
while ser.inWaiting() != 0:
a=ser.read()
res+=a.decode("utf-8")
pos=res.find("OSError")
if pos==-1:
hinweistxt="Autostart is on"
else:
hinweistxt="Autostart already on"
hinweis.config(text=hinweistxt)
stop()
def stop():
ser.close()
def start():
while True:
res=""
err=serialOn()
if err!="Error":
statustxt="ESP connectet on: "+err
status.config(text=statustxt)
ESPsend("import os")
ESPsend("os.listdir()")
while ser.inWaiting() != 0:
a=ser.read()
res+=a.decode("utf-8")
if "main.py" in res:
hinweistxt="Autostart is on"
else:
hinweistxt="Autostart is off"
hinweis.config(text=hinweistxt)
break
else:
if askyesno("No ESP found!!! Try again?"):
ser.close()
pass
else:
exit()
#----------------------------------------------------------------------------------
#---------- Witgets laden
frameButton = Frame(root)
frameButton.pack(fill='both')
button2=Button(frameButton, text="Autostart ON ", command=autoon)
button2.pack(side="right",padx="5",pady="2")
button1=Button(frameButton, text="Autostart OFF ", command=autooff)
button1.pack(side="right",padx="5")
hinweis = Label(root, fg = "lightgreen",bg = "gray", font = "Verdana 10 bold" )
hinweis.pack(fill='both',padx="5",pady="2")
hinweistxt="Change Autostart "
hinweis.config(text=hinweistxt)
status = Label(root)
status.pack(fill='both',padx="5",pady="2")
statustxt=" "
status.config(text=statustxt)
#------------------------------------------------------------------------------------
start()
root.mainloop()
|
[
"serial.Serial",
"time.sleep"
] |
[((1999, 2014), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2009, 2014), False, 'import time\n'), ((2150, 2165), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2160, 2165), False, 'import time\n'), ((1412, 1456), 'serial.Serial', 'serial.Serial', ([], {'port': 'comport', 'baudrate': '(115200)'}), '(port=comport, baudrate=115200)\n', (1425, 1456), False, 'import serial\n'), ((1713, 1726), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1723, 1726), False, 'import time\n')]
|
import os
import urllib.request
os.makedirs('saved_models', exist_ok=True)
model_path = 'http://shape2prog.csail.mit.edu/repo/wrn_40_2_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/wrn_40_2_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/resnet56_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/resnet56_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/resnet110_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/resnet110_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/resnet32x4_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/resnet32x4_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/vgg13_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/vgg13_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/ResNet50_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/ResNet50_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
|
[
"os.makedirs"
] |
[((36, 78), 'os.makedirs', 'os.makedirs', (['"""saved_models"""'], {'exist_ok': '(True)'}), "('saved_models', exist_ok=True)\n", (47, 78), False, 'import os\n'), ((216, 253), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (227, 253), False, 'import os\n'), ((554, 591), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (565, 591), False, 'import os\n'), ((894, 931), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (905, 931), False, 'import os\n'), ((1236, 1273), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (1247, 1273), False, 'import os\n'), ((1568, 1605), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (1579, 1605), False, 'import os\n'), ((1906, 1943), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (1917, 1943), False, 'import os\n')]
|
#!/usr/bin/env python
from argparse import ArgumentParser
import sys
from comp_pi import compute_pi
def main():
arg_parser = ArgumentParser(description='compute pi using Fortran '
'function')
arg_parser.add_argument('n', default=1000, nargs='?',
help='number of random points')
options = arg_parser.parse_args()
print(compute_pi(options.n))
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
|
[
"comp_pi.compute_pi",
"argparse.ArgumentParser",
"sys.exit"
] |
[((132, 195), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""compute pi using Fortran function"""'}), "(description='compute pi using Fortran function')\n", (146, 195), False, 'from argparse import ArgumentParser\n'), ((485, 501), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (493, 501), False, 'import sys\n'), ((397, 418), 'comp_pi.compute_pi', 'compute_pi', (['options.n'], {}), '(options.n)\n', (407, 418), False, 'from comp_pi import compute_pi\n')]
|
#!/usr/bin/env python3
import unittest
import torch
from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor
from Lgpytorch.test.lazy_tensor_test_case import LazyTensorTestCase
class TestCatLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 1
def create_lazy_tensor(self):
root = torch.randn(6, 7)
self.psd_mat = root.matmul(root.t())
slice1_mat = self.psd_mat[:2, :].requires_grad_()
slice2_mat = self.psd_mat[2:4, :].requires_grad_()
slice3_mat = self.psd_mat[4:6, :].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-2)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorColumn(LazyTensorTestCase, unittest.TestCase):
seed = 1
def create_lazy_tensor(self):
root = torch.randn(6, 7)
self.psd_mat = root.matmul(root.t())
slice1_mat = self.psd_mat[:, :2].requires_grad_()
slice2_mat = self.psd_mat[:, 2:4].requires_grad_()
slice3_mat = self.psd_mat[:, 4:6].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-1)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorBatch(LazyTensorTestCase, unittest.TestCase):
seed = 0
def create_lazy_tensor(self):
root = torch.randn(3, 6, 7)
self.psd_mat = root.matmul(root.transpose(-2, -1))
slice1_mat = self.psd_mat[..., :2, :].requires_grad_()
slice2_mat = self.psd_mat[..., 2:4, :].requires_grad_()
slice3_mat = self.psd_mat[..., 4:6, :].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-2)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorMultiBatch(LazyTensorTestCase, unittest.TestCase):
seed = 0
# Because these LTs are large, we'll skil the big tests
skip_slq_tests = True
def create_lazy_tensor(self):
root = torch.randn(4, 3, 6, 7)
self.psd_mat = root.matmul(root.transpose(-2, -1))
slice1_mat = self.psd_mat[..., :2, :].requires_grad_()
slice2_mat = self.psd_mat[..., 2:4, :].requires_grad_()
slice3_mat = self.psd_mat[..., 4:6, :].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-2)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorBatchCat(LazyTensorTestCase, unittest.TestCase):
seed = 0
# Because these LTs are large, we'll skil the big tests
skip_slq_tests = True
def create_lazy_tensor(self):
root = torch.randn(5, 3, 6, 7)
self.psd_mat = root.matmul(root.transpose(-2, -1))
slice1_mat = self.psd_mat[:2, ...].requires_grad_()
slice2_mat = self.psd_mat[2:3, ...].requires_grad_()
slice3_mat = self.psd_mat[3:, ...].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=0)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"Lgpytorch.lazy.CatLazyTensor",
"torch.randn",
"Lgpytorch.lazy.NonLazyTensor"
] |
[((3869, 3884), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3882, 3884), False, 'import unittest\n'), ((308, 325), 'torch.randn', 'torch.randn', (['(6)', '(7)'], {}), '(6, 7)\n', (319, 325), False, 'import torch\n'), ((566, 591), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice1_mat'], {}), '(slice1_mat)\n', (579, 591), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((609, 634), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice2_mat'], {}), '(slice2_mat)\n', (622, 634), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((652, 677), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice3_mat'], {}), '(slice3_mat)\n', (665, 677), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((694, 739), 'Lgpytorch.lazy.CatLazyTensor', 'CatLazyTensor', (['slice1', 'slice2', 'slice3'], {'dim': '(-2)'}), '(slice1, slice2, slice3, dim=-2)\n', (707, 739), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((987, 1004), 'torch.randn', 'torch.randn', (['(6)', '(7)'], {}), '(6, 7)\n', (998, 1004), False, 'import torch\n'), ((1245, 1270), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice1_mat'], {}), '(slice1_mat)\n', (1258, 1270), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((1288, 1313), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice2_mat'], {}), '(slice2_mat)\n', (1301, 1313), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((1331, 1356), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice3_mat'], {}), '(slice3_mat)\n', (1344, 1356), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((1373, 1418), 'Lgpytorch.lazy.CatLazyTensor', 'CatLazyTensor', (['slice1', 'slice2', 'slice3'], {'dim': '(-1)'}), '(slice1, slice2, slice3, dim=-1)\n', (1386, 1418), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((1665, 1685), 'torch.randn', 'torch.randn', (['(3)', '(6)', '(7)'], {}), '(3, 6, 7)\n', (1676, 1685), False, 'import torch\n'), ((1955, 1980), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice1_mat'], {}), '(slice1_mat)\n', (1968, 1980), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((1998, 2023), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice2_mat'], {}), '(slice2_mat)\n', (2011, 2023), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((2041, 2066), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice3_mat'], {}), '(slice3_mat)\n', (2054, 2066), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((2083, 2128), 'Lgpytorch.lazy.CatLazyTensor', 'CatLazyTensor', (['slice1', 'slice2', 'slice3'], {'dim': '(-2)'}), '(slice1, slice2, slice3, dim=-2)\n', (2096, 2128), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((2466, 2489), 'torch.randn', 'torch.randn', (['(4)', '(3)', '(6)', '(7)'], {}), '(4, 3, 6, 7)\n', (2477, 2489), False, 'import torch\n'), ((2759, 2784), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice1_mat'], {}), '(slice1_mat)\n', (2772, 2784), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((2802, 2827), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice2_mat'], {}), '(slice2_mat)\n', (2815, 2827), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((2845, 2870), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice3_mat'], {}), '(slice3_mat)\n', (2858, 2870), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((2887, 2932), 'Lgpytorch.lazy.CatLazyTensor', 'CatLazyTensor', (['slice1', 'slice2', 'slice3'], {'dim': '(-2)'}), '(slice1, slice2, slice3, dim=-2)\n', (2900, 2932), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((3268, 3291), 'torch.randn', 'torch.randn', (['(5)', '(3)', '(6)', '(7)'], {}), '(5, 3, 6, 7)\n', (3279, 3291), False, 'import torch\n'), ((3551, 3576), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice1_mat'], {}), '(slice1_mat)\n', (3564, 3576), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((3594, 3619), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice2_mat'], {}), '(slice2_mat)\n', (3607, 3619), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((3637, 3662), 'Lgpytorch.lazy.NonLazyTensor', 'NonLazyTensor', (['slice3_mat'], {}), '(slice3_mat)\n', (3650, 3662), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n'), ((3679, 3723), 'Lgpytorch.lazy.CatLazyTensor', 'CatLazyTensor', (['slice1', 'slice2', 'slice3'], {'dim': '(0)'}), '(slice1, slice2, slice3, dim=0)\n', (3692, 3723), False, 'from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor\n')]
|
import re
from AFD import AFD
class AFN:
def __init__(self, nome=None, estados=[], simbolos=[], estado_inicial=None, estados_finais=[], funcoes_programa={}):
self.nome = nome
self.estados = estados
self.simbolos = simbolos
self.estado_inicial = estado_inicial
self.estados_finais = estados_finais
self.funcoes_programa = funcoes_programa
def __str__(self):
output = f'\nnome={self.nome}'
output += f'\nestados={self.estados}'
output += f'\nsimbolos={self.simbolos}'
output += f'\nestado_inicial={self.estado_inicial}'
output += f'\nestados_finais={self.estados_finais}'
output += f'\nfuncoes_programa='
output += str([str(fp) + ' -> ' + str(e) for fp,
e in self.funcoes_programa.items()])
return output
@staticmethod
def afn_de_arquivo(caminho_arquivo):
''' Le um arquivo dado pelo caminho especificado e
retorna um automato finito deterministico'''
with open(caminho_arquivo) as file:
first_line = file.readline().split("=", 1)
nome = first_line[0]
# retira parenteses
str_definicao = first_line[1][1:-1]
# troca '{}' por '()'
str_definicao = str_definicao.replace(
'{', '(').replace('}', ')')
# regex para achar elementos entre ',' ou conjuntos de elementos entre '{}'
regex_exp = "[^,()]*(?:\([^)]*\))*[^,]*"
definicao = re.findall(regex_exp, str_definicao)
# tira os '()' e tira espacos em branco
definicao = [i.strip().replace('(', '').replace(')', '')
for i in definicao if i]
# separa string pelas ','
definicao = [i.split(',') for i in definicao]
estados = definicao[0]
simbolos = definicao[1]
estado_inicial = definicao[2][0]
estados_finais = definicao[3]
# descarta linha 'Prog'
file.readline()
funcoes_programa = {}
for line in file.readlines():
estado = re.search('^\((.*),', line)[0][1: -1]
simbolo = re.search(',(.*)\)=', line)[0][1: -2]
estado_resultante = re.search('=(.*)$', line)[0][1:]
if funcoes_programa.get((estado, simbolo)):
funcoes_programa[(estado, simbolo)].append(
estado_resultante)
else:
funcoes_programa[(estado, simbolo)] = [estado_resultante]
return AFN(nome, estados, simbolos, estado_inicial, estados_finais, funcoes_programa)
@staticmethod
def _saidas_novo_estado(estado, simbolo, funcoes_programa):
estados = estado.split('+')
saidas = []
for e in estados:
estado_resultante = funcoes_programa.get((e, simbolo))
if estado_resultante:
saidas.extend(estado_resultante)
if saidas == []:
return 'QM'
return '+'.join(sorted(list(set(saidas))))
@staticmethod
def _define_estados_finais(estados, estados_finais):
finais = []
for estado in estados:
for ef in estados_finais:
if ef in estado:
finais.append(estado)
return finais
def para_AFD(self):
q = []
t = {}
q.append(self.estado_inicial)
estado_morto = 'QM'
for simbolo in self.simbolos:
estado_resultante = self.funcoes_programa.get(
(self.estado_inicial, simbolo))
if estado_resultante:
t[(self.estado_inicial, simbolo)] = '+'.join(estado_resultante)
else:
t[(self.estado_inicial, simbolo)] = estado_morto
while(set(q) != set(t.values())):
for er in list(t.values()):
if er not in q:
q.append(er)
for simbolo in self.simbolos:
if '+' in er:
t[(er, simbolo)] = AFN._saidas_novo_estado(
er, simbolo, self.funcoes_programa)
else:
estado_resultante = self.funcoes_programa.get(
(er, simbolo))
if estado_resultante:
t[(er, simbolo)] = '+'.join(estado_resultante)
else:
t[(er, simbolo)] = estado_morto
estados_finais = AFN._define_estados_finais(q, self.estados_finais)
return AFD(nome=self.nome, estados=q, simbolos=self.simbolos, estado_inicial=self.estado_inicial, estados_finais=estados_finais, funcoes_programa=t)
|
[
"re.findall",
"AFD.AFD",
"re.search"
] |
[((4692, 4838), 'AFD.AFD', 'AFD', ([], {'nome': 'self.nome', 'estados': 'q', 'simbolos': 'self.simbolos', 'estado_inicial': 'self.estado_inicial', 'estados_finais': 'estados_finais', 'funcoes_programa': 't'}), '(nome=self.nome, estados=q, simbolos=self.simbolos, estado_inicial=self.\n estado_inicial, estados_finais=estados_finais, funcoes_programa=t)\n', (4695, 4838), False, 'from AFD import AFD\n'), ((1533, 1569), 're.findall', 're.findall', (['regex_exp', 'str_definicao'], {}), '(regex_exp, str_definicao)\n', (1543, 1569), False, 'import re\n'), ((2164, 2192), 're.search', 're.search', (['"""^\\\\((.*),"""', 'line'], {}), "('^\\\\((.*),', line)\n", (2173, 2192), False, 'import re\n'), ((2228, 2256), 're.search', 're.search', (['""",(.*)\\\\)="""', 'line'], {}), "(',(.*)\\\\)=', line)\n", (2237, 2256), False, 'import re\n'), ((2302, 2327), 're.search', 're.search', (['"""=(.*)$"""', 'line'], {}), "('=(.*)$', line)\n", (2311, 2327), False, 'import re\n')]
|
from typing import List, Dict
import pathlib
import shutil
import enum
from typer import Option as O_
import typer
from cs_tools.helpers.cli_ux import console, frontend, CSToolsGroup, CSToolsCommand
from cs_tools.util.datetime import to_datetime
from cs_tools.tools.common import run_tql_command, run_tql_script, tsload
from cs_tools.util.algo import chunks
from cs_tools.settings import TSConfig
from cs_tools.const import FMT_TSLOAD_DATETIME
from cs_tools.thoughtspot import ThoughtSpot
from cs_tools.tools import common
from .util import FileQueue
HERE = pathlib.Path(__file__).parent
class SystemType(str, enum.Enum):
"""
Reversible mapping of system to friendly names.
"""
ONE_TO_ONE_LOGICAL = 'system table'
USER_DEFINED = 'imported data'
WORKSHEET = 'worksheet'
AGGR_WORKSHEET = 'view'
PINBOARD_ANSWER_BOOK = 'pinboard'
QUESTION_ANSWER_BOOK = 'saved answer'
MATERIALIZED_VIEW = 'materialized view'
CALENDAR_TABLE = 'custom calendar'
FORMULA = 'formula'
@classmethod
def to_friendly(cls, value) -> str:
return getattr(cls, value).value
@classmethod
def to_system(cls, value) -> str:
return getattr(cls, value).name
class ParentType(str, enum.Enum):
"""
Limits the type of objects passed on via CLI.
"""
SYSTEM_TABLE = 'system table'
IMPORTED_DATA = 'imported data'
WORKSHEET = 'worksheet'
VIEW = 'view'
def _format_metadata_objects(queue, metadata: List[Dict]):
"""
Standardize data in an expected format.
This is a simple transformation layer, we are fitting our data to be
record-based and in the format that's expected for an eventual
tsload command.
"""
for parent in metadata:
queue.put({
'guid_': parent['id'],
'name': parent['name'],
'description': parent.get('description'),
'author_guid': parent['author'],
'author_name': parent['authorName'],
'author_display_name': parent['authorDisplayName'],
'created': to_datetime(parent['created'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
'modified': to_datetime(parent['modified'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
# 'modified_by': parent['modifiedBy'] # user.guid
'type': SystemType.to_friendly(parent['type']) if parent.get('type') else 'column',
'context': parent.get('owner')
})
def _format_dependency(queue, parent_guid, dependencies: Dict[str, Dict]):
"""
Standardize data in an expected format.
This is a simple transformation layer, we are fitting our data to be
record-based and in the format that's expected for an eventual
tsload command.
"""
for dependency in dependencies:
queue.put({
'guid_': dependency['id'],
'parent_guid': parent_guid,
'name': dependency['name'],
'description': dependency.get('description'),
'author_guid': dependency['author'],
'author_name': dependency['authorName'],
'author_display_name': dependency['authorDisplayName'],
'created': to_datetime(dependency['created'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
'modified': to_datetime(dependency['modified'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
# 'modified_by': dependency['modifiedBy'] # user.guid
'type': SystemType.to_friendly(dependency['type'])
})
def _get_dependents(api: ThoughtSpot, queue, parent: str, metadata: List[Dict]):
for chunk in chunks(metadata, n=50):
r = api._dependency.list_dependents(
id=[_['id'] for _ in chunk],
type='LOGICAL_COLUMN' if parent in ('formula', 'column') else 'LOGICAL_TABLE',
batchsize=-1,
timeout=None if parent == 'column' else -1
)
for parent_guid, dependent_data in r.json().items():
for dependency_type, dependencies in dependent_data.items():
for dependency in dependencies:
dependency['type'] = dependency.get('type', dependency_type)
queue.put({
'guid_': dependency['id'],
'parent_guid': parent_guid,
'name': dependency['name'],
'description': dependency.get('description'),
'author_guid': dependency['author'],
'author_name': dependency['authorName'],
'author_display_name': dependency['authorDisplayName'],
'created': to_datetime(dependency['created'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
'modified': to_datetime(dependency['modified'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
# 'modified_by': dependency['modifiedBy'] # user.guid
'type': SystemType.to_friendly(dependency['type'])
})
def _get_recordset_metadata(api: ThoughtSpot) -> Dict[str, List]:
_seen = {}
metadata = {
'system table': [],
'imported data': [],
'worksheet': [],
'view': [],
'formula': [],
'column': [],
'other': []
}
active_users = common.batched(
api._metadata.list,
type='USER',
batchsize=5000,
transformer=lambda r: r.json()['headers']
)
r = [
*common.batched(
api._metadata.list,
type='LOGICAL_TABLE',
batchsize=5000,
transformer=lambda r: r.json()['headers']
),
*common.batched(
api._metadata.list,
type='LOGICAL_COLUMN',
batchsize=5000,
# NOTE: "True" = includes Custom Calendars & Materialized Views...
# auto_created=False,
transformer=lambda r: r.json()['headers']
)
]
for item in r:
try:
friendly = SystemType.to_friendly(item['type'])
except KeyError:
friendly = 'column'
except AttributeError:
friendly = 'other'
author = next((u for u in active_users if u['id'] == item['author']), None) or {}
parent = _seen.get(item['owner']) or {}
item = {
**item,
'friendly': friendly,
'owner': parent.get('name'),
'authorName': author.get('name') or item.get('authorName'),
'authorDisplayName': author.get('displayName') or item.get('authorDisplayName'),
}
_seen[item['id']] = item
metadata[friendly].append(item)
return metadata
app = typer.Typer(
help="""
Make Dependencies searchable in your platform.
[b][yellow]USE AT YOUR OWN RISK![/b] This tool uses private API calls which
could change on any version update and break the tool.[/]
Dependencies can be collected for various types of metadata. For example,
many tables are used within a worksheet, while many worksheets will have
answers and pinboards built on top of them.
\b
Metadata Object Metadata Dependent
- guid - guid
- name - parent guid
- description - name
- author guid - description
- author name - author guid
- author display name - author name
- created - author display name
- modified - created
- object type - modified
- context - object type
\f
Also available, but not developed for..
Tag / Stickers -> TAG
Embrace Connections -> DATA_SOURCE
""",
cls=CSToolsGroup,
options_metavar='[--version, --help]'
)
@app.command(cls=CSToolsCommand)
@frontend
def spotapp(
export: pathlib.Path = O_(None, help='directory to save the spot app to', file_okay=False, resolve_path=True),
# maintained for backwards compatability
backwards_compat: pathlib.Path = O_(None, '--save_path', help='backwards-compat if specified, directory to save data to', hidden=True),
**frontend_kw
):
"""
Exports the SpotApp associated with this tool.
"""
shutil.copy(HERE / 'static' / 'spotapps.zip', export)
console.print(f'moved the SpotApp to {export}')
@app.command(cls=CSToolsCommand)
@frontend
def gather(
export: pathlib.Path = O_(None, help='directory to save the spot app to', file_okay=False, resolve_path=True),
parent: ParentType=O_(None, help='type of object to find dependents for'),
include_columns: bool=O_(False, '--include-columns', help='whether or not to find column dependents', show_default=False),
# maintained for backwards compatability
backwards_compat: pathlib.Path = O_(None, '--save_path', help='backwards-compat if specified, directory to save data to', hidden=True),
**frontend_kw
):
"""
Gather and optionally, insert data into Falcon.
By default, data is automatically gathered and inserted into the
platform. If --export argument is used, data will not be inserted
and will instead be dumped to the location specified.
"""
cfg = TSConfig.from_cli_args(**frontend_kw, interactive=True)
export = export or backwards_compat
dir_ = cfg.temp_dir if export is None else export
dir_.parent.mkdir(exist_ok=True)
static = HERE / 'static'
parent_types = [e.value for e in ParentType] if parent is None else [parent]
if include_columns:
parent_types.extend(['formula', 'column'])
with ThoughtSpot(cfg) as ts:
with console.status('getting top level metadata'):
metadata = _get_recordset_metadata(ts.api)
parent_q = FileQueue(dir_ / 'introspect_metadata_object.csv')
children_q = FileQueue(dir_ / 'introspect_metadata_dependent.csv')
with parent_q as pq, children_q as cq:
for parent in parent_types:
with console.status(f'getting dependents of metadata: {parent}'):
_format_metadata_objects(pq, metadata[parent])
_get_dependents(ts.api, cq, parent, metadata[parent])
if export is not None:
return
try:
with console.status('creating tables with remote TQL'):
run_tql_command(ts, command='CREATE DATABASE cs_tools;')
run_tql_script(ts, fp=static / 'create_tables.tql', raise_errors=True)
except common.TableAlreadyExists:
with console.status('altering tables with remote TQL'):
run_tql_script(ts, fp=static / 'alter_tables.tql')
with console.status('loading data to Falcon with remote tsload'):
for stem in ('introspect_metadata_object', 'introspect_metadata_dependent'):
path = dir_ / f'{stem}.csv'
cycle_id = tsload(
ts,
fp=path,
target_database='cs_tools',
target_table=stem,
has_header_row=True
)
path.unlink()
r = ts.api.ts_dataservice.load_status(cycle_id).json()
m = ts.api.ts_dataservice._parse_tsload_status(r)
console.print(m)
|
[
"cs_tools.thoughtspot.ThoughtSpot",
"cs_tools.helpers.cli_ux.console.status",
"pathlib.Path",
"typer.Option",
"cs_tools.util.algo.chunks",
"typer.Typer",
"cs_tools.helpers.cli_ux.console.print",
"cs_tools.tools.common.run_tql_script",
"cs_tools.tools.common.tsload",
"cs_tools.util.datetime.to_datetime",
"shutil.copy",
"cs_tools.settings.TSConfig.from_cli_args",
"cs_tools.tools.common.run_tql_command"
] |
[((6713, 7834), 'typer.Typer', 'typer.Typer', ([], {'help': '"""\n Make Dependencies searchable in your platform.\n\n [b][yellow]USE AT YOUR OWN RISK![/b] This tool uses private API calls which\n could change on any version update and break the tool.[/]\n\n Dependencies can be collected for various types of metadata. For example,\n many tables are used within a worksheet, while many worksheets will have\n answers and pinboards built on top of them.\n\n \x08\n Metadata Object Metadata Dependent\n - guid - guid\n - name - parent guid\n - description - name\n - author guid - description\n - author name - author guid\n - author display name - author name\n - created - author display name\n - modified - created\n - object type - modified\n - context - object type\n\n \x0c\n Also available, but not developed for..\n\n Tag / Stickers -> TAG\n Embrace Connections -> DATA_SOURCE\n """', 'cls': 'CSToolsGroup', 'options_metavar': '"""[--version, --help]"""'}), '(help=\n """\n Make Dependencies searchable in your platform.\n\n [b][yellow]USE AT YOUR OWN RISK![/b] This tool uses private API calls which\n could change on any version update and break the tool.[/]\n\n Dependencies can be collected for various types of metadata. For example,\n many tables are used within a worksheet, while many worksheets will have\n answers and pinboards built on top of them.\n\n \x08\n Metadata Object Metadata Dependent\n - guid - guid\n - name - parent guid\n - description - name\n - author guid - description\n - author name - author guid\n - author display name - author name\n - created - author display name\n - modified - created\n - object type - modified\n - context - object type\n\n \x0c\n Also available, but not developed for..\n\n Tag / Stickers -> TAG\n Embrace Connections -> DATA_SOURCE\n """\n , cls=CSToolsGroup, options_metavar=\'[--version, --help]\')\n', (6724, 7834), False, 'import typer\n'), ((563, 585), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (575, 585), False, 'import pathlib\n'), ((3592, 3614), 'cs_tools.util.algo.chunks', 'chunks', (['metadata'], {'n': '(50)'}), '(metadata, n=50)\n', (3598, 3614), False, 'from cs_tools.util.algo import chunks\n'), ((7926, 8016), 'typer.Option', 'O_', (['None'], {'help': '"""directory to save the spot app to"""', 'file_okay': '(False)', 'resolve_path': '(True)'}), "(None, help='directory to save the spot app to', file_okay=False,\n resolve_path=True)\n", (7928, 8016), True, 'from typer import Option as O_\n'), ((8096, 8202), 'typer.Option', 'O_', (['None', '"""--save_path"""'], {'help': '"""backwards-compat if specified, directory to save data to"""', 'hidden': '(True)'}), "(None, '--save_path', help=\n 'backwards-compat if specified, directory to save data to', hidden=True)\n", (8098, 8202), True, 'from typer import Option as O_\n'), ((8291, 8344), 'shutil.copy', 'shutil.copy', (["(HERE / 'static' / 'spotapps.zip')", 'export'], {}), "(HERE / 'static' / 'spotapps.zip', export)\n", (8302, 8344), False, 'import shutil\n'), ((8349, 8396), 'cs_tools.helpers.cli_ux.console.print', 'console.print', (['f"""moved the SpotApp to {export}"""'], {}), "(f'moved the SpotApp to {export}')\n", (8362, 8396), False, 'from cs_tools.helpers.cli_ux import console, frontend, CSToolsGroup, CSToolsCommand\n'), ((8481, 8571), 'typer.Option', 'O_', (['None'], {'help': '"""directory to save the spot app to"""', 'file_okay': '(False)', 'resolve_path': '(True)'}), "(None, help='directory to save the spot app to', file_okay=False,\n resolve_path=True)\n", (8483, 8571), True, 'from typer import Option as O_\n'), ((8592, 8646), 'typer.Option', 'O_', (['None'], {'help': '"""type of object to find dependents for"""'}), "(None, help='type of object to find dependents for')\n", (8594, 8646), True, 'from typer import Option as O_\n'), ((8674, 8778), 'typer.Option', 'O_', (['(False)', '"""--include-columns"""'], {'help': '"""whether or not to find column dependents"""', 'show_default': '(False)'}), "(False, '--include-columns', help=\n 'whether or not to find column dependents', show_default=False)\n", (8676, 8778), True, 'from typer import Option as O_\n'), ((8857, 8963), 'typer.Option', 'O_', (['None', '"""--save_path"""'], {'help': '"""backwards-compat if specified, directory to save data to"""', 'hidden': '(True)'}), "(None, '--save_path', help=\n 'backwards-compat if specified, directory to save data to', hidden=True)\n", (8859, 8963), True, 'from typer import Option as O_\n'), ((9257, 9312), 'cs_tools.settings.TSConfig.from_cli_args', 'TSConfig.from_cli_args', ([], {'interactive': '(True)'}), '(**frontend_kw, interactive=True)\n', (9279, 9312), False, 'from cs_tools.settings import TSConfig\n'), ((9642, 9658), 'cs_tools.thoughtspot.ThoughtSpot', 'ThoughtSpot', (['cfg'], {}), '(cfg)\n', (9653, 9658), False, 'from cs_tools.thoughtspot import ThoughtSpot\n'), ((9679, 9723), 'cs_tools.helpers.cli_ux.console.status', 'console.status', (['"""getting top level metadata"""'], {}), "('getting top level metadata')\n", (9693, 9723), False, 'from cs_tools.helpers.cli_ux import console, frontend, CSToolsGroup, CSToolsCommand\n'), ((10721, 10780), 'cs_tools.helpers.cli_ux.console.status', 'console.status', (['"""loading data to Falcon with remote tsload"""'], {}), "('loading data to Falcon with remote tsload')\n", (10735, 10780), False, 'from cs_tools.helpers.cli_ux import console, frontend, CSToolsGroup, CSToolsCommand\n'), ((10319, 10368), 'cs_tools.helpers.cli_ux.console.status', 'console.status', (['"""creating tables with remote TQL"""'], {}), "('creating tables with remote TQL')\n", (10333, 10368), False, 'from cs_tools.helpers.cli_ux import console, frontend, CSToolsGroup, CSToolsCommand\n'), ((10386, 10442), 'cs_tools.tools.common.run_tql_command', 'run_tql_command', (['ts'], {'command': '"""CREATE DATABASE cs_tools;"""'}), "(ts, command='CREATE DATABASE cs_tools;')\n", (10401, 10442), False, 'from cs_tools.tools.common import run_tql_command, run_tql_script, tsload\n'), ((10459, 10529), 'cs_tools.tools.common.run_tql_script', 'run_tql_script', (['ts'], {'fp': "(static / 'create_tables.tql')", 'raise_errors': '(True)'}), "(ts, fp=static / 'create_tables.tql', raise_errors=True)\n", (10473, 10529), False, 'from cs_tools.tools.common import run_tql_command, run_tql_script, tsload\n'), ((10942, 11033), 'cs_tools.tools.common.tsload', 'tsload', (['ts'], {'fp': 'path', 'target_database': '"""cs_tools"""', 'target_table': 'stem', 'has_header_row': '(True)'}), "(ts, fp=path, target_database='cs_tools', target_table=stem,\n has_header_row=True)\n", (10948, 11033), False, 'from cs_tools.tools.common import run_tql_command, run_tql_script, tsload\n'), ((11331, 11347), 'cs_tools.helpers.cli_ux.console.print', 'console.print', (['m'], {}), '(m)\n', (11344, 11347), False, 'from cs_tools.helpers.cli_ux import console, frontend, CSToolsGroup, CSToolsCommand\n'), ((10035, 10094), 'cs_tools.helpers.cli_ux.console.status', 'console.status', (['f"""getting dependents of metadata: {parent}"""'], {}), "(f'getting dependents of metadata: {parent}')\n", (10049, 10094), False, 'from cs_tools.helpers.cli_ux import console, frontend, CSToolsGroup, CSToolsCommand\n'), ((10589, 10638), 'cs_tools.helpers.cli_ux.console.status', 'console.status', (['"""altering tables with remote TQL"""'], {}), "('altering tables with remote TQL')\n", (10603, 10638), False, 'from cs_tools.helpers.cli_ux import console, frontend, CSToolsGroup, CSToolsCommand\n'), ((10656, 10706), 'cs_tools.tools.common.run_tql_script', 'run_tql_script', (['ts'], {'fp': "(static / 'alter_tables.tql')"}), "(ts, fp=static / 'alter_tables.tql')\n", (10670, 10706), False, 'from cs_tools.tools.common import run_tql_command, run_tql_script, tsload\n'), ((2064, 2105), 'cs_tools.util.datetime.to_datetime', 'to_datetime', (["parent['created']"], {'unit': '"""ms"""'}), "(parent['created'], unit='ms')\n", (2075, 2105), False, 'from cs_tools.util.datetime import to_datetime\n'), ((2161, 2203), 'cs_tools.util.datetime.to_datetime', 'to_datetime', (["parent['modified']"], {'unit': '"""ms"""'}), "(parent['modified'], unit='ms')\n", (2172, 2203), False, 'from cs_tools.util.datetime import to_datetime\n'), ((3172, 3217), 'cs_tools.util.datetime.to_datetime', 'to_datetime', (["dependency['created']"], {'unit': '"""ms"""'}), "(dependency['created'], unit='ms')\n", (3183, 3217), False, 'from cs_tools.util.datetime import to_datetime\n'), ((3273, 3319), 'cs_tools.util.datetime.to_datetime', 'to_datetime', (["dependency['modified']"], {'unit': '"""ms"""'}), "(dependency['modified'], unit='ms')\n", (3284, 3319), False, 'from cs_tools.util.datetime import to_datetime\n'), ((4667, 4712), 'cs_tools.util.datetime.to_datetime', 'to_datetime', (["dependency['created']"], {'unit': '"""ms"""'}), "(dependency['created'], unit='ms')\n", (4678, 4712), False, 'from cs_tools.util.datetime import to_datetime\n'), ((4780, 4826), 'cs_tools.util.datetime.to_datetime', 'to_datetime', (["dependency['modified']"], {'unit': '"""ms"""'}), "(dependency['modified'], unit='ms')\n", (4791, 4826), False, 'from cs_tools.util.datetime import to_datetime\n')]
|
#!/usr/bin/python
import math
import random
from utils.log import log
from bots.simpleBots import BasicBot
def get_Chosen(num_cards, desired_score):
chosen = list(range(1,num_cards+1))
last_removed = 0
while sum(chosen) > desired_score:
#remove a random element
last_removed = random.randint(0,len(chosen)-1)
add_back = chosen[last_removed]
chosen.remove(add_back)
chosen.append(add_back)
chosen.sort
return chosen
class shiftBot(BasicBot):
def __init__(self, player_num, num_players, num_cards, num_games):
#this bot is pretty dumb, and just plays bottom up
self.shift_hand = list(range(1, num_cards+1))
self.num_cards = num_cards
self.player_num = player_num #I can use this to cheat I think by asking the other bots what they are planning on playing
self.num_players = num_players
self.start_index = 1
def end_game(self, result):
#increment index
self.start_index += 1
if(self.start_index >= self.num_cards):
self.start_index = 0
def take_turn(self, game_state, verbose = False):
num_cards_remaining = len(game_state.current_prizes)
index = (self.start_index + self.num_cards - num_cards_remaining) % self.num_cards
return self.shift_hand[index]
class PhillipAdaptoBot(BasicBot):
def __init__(self, player_num, num_players, num_cards, num_games):
#Bot is initialized once at the beginning of the competition, and persists between games.
self.player_num = player_num #I can use this to cheat I think by asking the other bots what they are planning on playing
self.num_players = num_players #normally 2, but ideally, you should allow your bot to gracefully handle more
self.num_cards = num_cards
self.num_games = 50
self.current_record = 0
self.game_count = 0
self.state = 0 #I'll use this to cycle through strategies attempting to hard counter my opponent
self.implemented_strategies = 8 #can only cycle through strategies that I know
self.wobble = 0 #some secret sauce
self.staying_power = 2
self.desired_score = math.ceil((num_cards + 1) * num_cards / 4)
self.chosen = get_Chosen(self.num_cards, self.desired_score)
return
def end_game(self, result):
#Called by GameArena upon game end. Result is the number of the winning bot previous game, -1 if tie
#Likely want to reset any tracking variables that persist between rounds here.
self.game_count += 1
self.chosen = get_Chosen(self.num_cards, self.desired_score)
if result != self.player_num or self.wobble == 1:
#It think that means I lost, and am not hard countering
self.state += 1
if self.state >= self.implemented_strategies:
self.state = 0 #You're probably sunk at this point
#if self.current_record > self.staying_power:
#self.wobble = 1
self.current_record = 0
else:
self.current_record += 1 # a little ugly, but who cares
#this means I won, and should not change strategy
#want to detect a winning streak
return
def take_turn(self, game_state, verbose = False):
#a completed bot should wrap all log statments in verbosity checks, so we don't get a flooded console if running 1000 iterations
if verbose:
log(self,"This is a verbose print statment!")
#the goal is to beat the opponent by one when possible (most effecient)
num_cards_remaining = len(game_state.current_prizes)
my_score = game_state.current_scores[self.player_num]
my_current_hand = game_state.current_hands[self.player_num]
if self.state == 0:#default case should be obvious bot
play = game_state.prize_this_round
elif self.state == 1: #bidding fairly didn't win the first round, could be playing a random bot or literally anything...
if len(my_current_hand) > 1:
play = self.num_cards - len(my_current_hand) + 2
else:
play = min(my_current_hand)
elif self.state == 2:
play = max(my_current_hand)
elif self.state == 3:
if game_state.prize_this_round < self.num_cards:
play = game_state.prize_this_round + 1
else:
play = 1
elif self.state == 4:
if game_state.prize_this_round < self.num_cards - 1:
play = game_state.prize_this_round + 2
else:
play = min(my_current_hand)
elif self.state == 5:
if game_state.prize_this_round > self.num_cards:
play = game_state.prize_this_round - 1
else:
play = max(my_current_hand)
elif self.state == 6:
if game_state.prize_this_round > self.num_cards + 1:
play = game_state.prize_this_round - 2
else:
play = max(my_current_hand)
elif self.state == 7:
if game_state.prize_this_round in self.chosen:
play = my_current_hand[-(len(self.chosen) - self.chosen.index(game_state.prize_this_round)):][0]
#play = max(my_current_hand)
self.chosen.remove(game_state.prize_this_round)
else:
play = min(my_current_hand)
return play # return a card to play
class PhillipBotUpBot(BasicBot):
def take_turn(self, game, verbose = False):
"""
Called by GameArena when it's time to take your turn. You are passed a "game" object with this info to work with:
card = (int) value 1 thru num_cards
variables available to your bot:
self.player_num = your player number
self.num_players = normally 2, but ideally, you should allow your bot to gracefully handle more
self.num_cards = normally 13, but ideally, you should allow your bot to gracefully handle any amount
game_state.current_won_cards[player_num][cards] = list of cards each player has won so far
game_state.current_scores[player_num] = current score of each each player
game_state.current_hands[player][cards] = list of cards currently in each player's hand
game_state.current_prizes[cards] = list of prizes remaining
game_state.prize_this_round (int) = current prize showing for this round
"""
num_cards_remaining = len(game.current_prizes)
my_score = game.current_scores[self.player_num]
my_current_hand = game.current_hands[self.player_num]
if (my_score > 0) or (game.prize_this_round == 12):
play = max(my_current_hand)
else:
play = min(my_current_hand) #base strategy, need to add tweaks later
return play
|
[
"utils.log.log",
"math.ceil"
] |
[((1997, 2039), 'math.ceil', 'math.ceil', (['((num_cards + 1) * num_cards / 4)'], {}), '((num_cards + 1) * num_cards / 4)\n', (2006, 2039), False, 'import math\n'), ((3112, 3158), 'utils.log.log', 'log', (['self', '"""This is a verbose print statment!"""'], {}), "(self, 'This is a verbose print statment!')\n", (3115, 3158), False, 'from utils.log import log\n')]
|
import uuid
from typing import Dict, List
from nehushtan.ws.NehushtanWebsocketConnectionEntity import NehushtanWebsocketConnectionEntity
class TestWebsocketRegisterAgent:
def __init__(self):
self.__map: Dict[str, NehushtanWebsocketConnectionEntity] = {}
self.agent_identity = str(uuid.uuid4())
def register(self, websocket):
entity = NehushtanWebsocketConnectionEntity(websocket)
self.__map[entity.get_key()] = entity
print(f"TestWebsocketRegisterAgent[{self.agent_identity}] registered [{entity.get_key()}]")
return entity
def unregister(self, key: str):
if self.__map.get(key):
del self.__map[key]
print(f"TestWebsocketRegisterAgent[{self.agent_identity}] unregistered [{key}]")
def read(self, key: str):
print(f"TestWebsocketRegisterAgent[{self.agent_identity}] reading [{key}]")
return self.__map.get(key)
def list_for_server(self, local_key: str) -> List[NehushtanWebsocketConnectionEntity]:
print(f"TestWebsocketRegisterAgent[{self.agent_identity}] listing for [{local_key}]")
enities = []
for k, v in self.__map.items():
if v.get_local_key() == local_key:
enities.append(v)
return enities
|
[
"nehushtan.ws.NehushtanWebsocketConnectionEntity.NehushtanWebsocketConnectionEntity",
"uuid.uuid4"
] |
[((371, 416), 'nehushtan.ws.NehushtanWebsocketConnectionEntity.NehushtanWebsocketConnectionEntity', 'NehushtanWebsocketConnectionEntity', (['websocket'], {}), '(websocket)\n', (405, 416), False, 'from nehushtan.ws.NehushtanWebsocketConnectionEntity import NehushtanWebsocketConnectionEntity\n'), ((304, 316), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (314, 316), False, 'import uuid\n')]
|
# Generated by Django 4.0.3 on 2022-04-02 17:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='cocktail',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_updated', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('id_api', models.PositiveIntegerField()),
],
),
migrations.CreateModel(
name='party',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('paypal', models.URLField()),
('name', models.CharField(max_length=50)),
('resume', models.CharField(max_length=500)),
('place', models.CharField(max_length=150)),
('datehour', models.DateTimeField()),
('last_updated', models.DateTimeField(auto_now=True)),
('price', models.FloatField()),
('drink', models.ManyToManyField(to='partycipe.cocktail')),
('organisate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='participate',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_updated', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('etat', models.BooleanField()),
('party', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='partycipe.party')),
('utilisateur', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"django.db.models.FloatField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.BooleanField",
"django.db.models.PositiveIntegerField",
"django.db.models.BigAutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.URLField",
"django.db.models.CharField"
] |
[((259, 316), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (290, 316), False, 'from django.db import migrations, models\n'), ((456, 552), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (475, 552), False, 'from django.db import migrations, models\n'), ((585, 620), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (605, 620), False, 'from django.db import migrations, models\n'), ((652, 691), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (672, 691), False, 'from django.db import migrations, models\n'), ((722, 751), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (749, 751), False, 'from django.db import migrations, models\n'), ((888, 984), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (907, 984), False, 'from django.db import migrations, models\n'), ((1012, 1051), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1032, 1051), False, 'from django.db import migrations, models\n'), ((1082, 1099), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (1097, 1099), False, 'from django.db import migrations, models\n'), ((1128, 1159), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1144, 1159), False, 'from django.db import migrations, models\n'), ((1190, 1222), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1206, 1222), False, 'from django.db import migrations, models\n'), ((1252, 1284), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (1268, 1284), False, 'from django.db import migrations, models\n'), ((1317, 1339), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1337, 1339), False, 'from django.db import migrations, models\n'), ((1376, 1411), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1396, 1411), False, 'from django.db import migrations, models\n'), ((1441, 1460), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1458, 1460), False, 'from django.db import migrations, models\n'), ((1490, 1537), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""partycipe.cocktail"""'}), "(to='partycipe.cocktail')\n", (1512, 1537), False, 'from django.db import migrations, models\n'), ((1572, 1668), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1589, 1668), False, 'from django.db import migrations, models\n'), ((1806, 1902), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1825, 1902), False, 'from django.db import migrations, models\n'), ((1935, 1970), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1955, 1970), False, 'from django.db import migrations, models\n'), ((2002, 2041), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2022, 2041), False, 'from django.db import migrations, models\n'), ((2070, 2091), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (2089, 2091), False, 'from django.db import migrations, models\n'), ((2121, 2210), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""partycipe.party"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'partycipe.party')\n", (2138, 2210), False, 'from django.db import migrations, models\n'), ((2241, 2337), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (2258, 2337), False, 'from django.db import migrations, models\n')]
|
"""
TutorialMirror
A simple mirror object to experiment with.
"""
from evennia import DefaultObject
from evennia.utils import make_iter, is_iter
from evennia import logger
class TutorialMirror(DefaultObject):
"""
A simple mirror object that
- echoes back the description of the object looking at it
- echoes back whatever is being sent to its .msg - to the
sender, if given, otherwise to the location of the mirror.
"""
def return_appearance(self, looker, **kwargs):
"""
This formats the description of this object. Called by the 'look' command.
Args:
looker (Object): Object doing the looking.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
if isinstance(looker, self.__class__):
# avoid infinite recursion by having two mirrors look at each other
return "The image of yourself stretches into infinity."
return f"{self.key} shows your reflection:\n{looker.db.desc}"
def msg(self, text=None, from_obj=None, **kwargs):
"""
Simply override .msg to echo back to the messenger or to the current
location.
Args:
text (str or tuple, optional): The message to send. This
is treated internally like any send-command, so its
value can be a tuple if sending multiple arguments to
the `text` oob command.
from_obj (obj or iterable)
given, at_msg_send will be called. This value will be
passed on to the protocol. If iterable, will execute hook
on all entities in it.
"""
if not text:
text = "<silence>"
text = text[0] if is_iter(text) else text
if from_obj:
for obj in make_iter(from_obj):
obj.msg(f'{self.key} echoes back to you:\n"{text}".')
elif self.location:
self.location.msg_contents(f'{self.key} echoes back:\n"{text}".', exclude=[self])
else:
# no from_obj and no location, just log
logger.log_msg(f"{self.key}.msg was called without from_obj and .location is None.")
|
[
"evennia.logger.log_msg",
"evennia.utils.make_iter",
"evennia.utils.is_iter"
] |
[((1810, 1823), 'evennia.utils.is_iter', 'is_iter', (['text'], {}), '(text)\n', (1817, 1823), False, 'from evennia.utils import make_iter, is_iter\n'), ((1878, 1897), 'evennia.utils.make_iter', 'make_iter', (['from_obj'], {}), '(from_obj)\n', (1887, 1897), False, 'from evennia.utils import make_iter, is_iter\n'), ((2169, 2258), 'evennia.logger.log_msg', 'logger.log_msg', (['f"""{self.key}.msg was called without from_obj and .location is None."""'], {}), "(\n f'{self.key}.msg was called without from_obj and .location is None.')\n", (2183, 2258), False, 'from evennia import logger\n')]
|
# -----------------------------------------------------------------------
# Author: <NAME>
#
# Purpose: Determines the fire season for each window. The fire season is
# defined as the minimum number of consecutive months that contain more
# than 80% of the burned area (Archibald ett al 2013; Abatzoglou et al.
# 2018).
#
# References:
# * <NAME>., <NAME>., <NAME>., & Bradstock,
# <NAME>. (2013). Defining pyromes and global syndromes of fire regimes.
# Proceedings of the National Academy of Sciences of the United States
# of America, 110(16), 6442–6447.
#
# * <NAME>., <NAME>., <NAME>., <NAME>., &
# <NAME>. (2018). Global patterns of interannual climate–fire
# relationships. Global Change Biology, 24(11), 5164–5175.
# -----------------------------------------------------------------------
import os
from calendar import month_abbr
import pandas as pd
from src.utils.constants import REGIONS, BURNED_AREA_THRESHOLD
if __name__ == "__main__":
# Project's root
os.chdir("../..")
output_folder = "results/csv"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
df = pd.DataFrame(columns=["window", "months"])
for region in REGIONS:
month_groups = pd.read_excel(
f"results/xlsx/{region['name']}/fire_groups.xlsx", sheet_name="Month"
)
# Compute 80% threshold.
threshold = month_groups["area"].sum() * BURNED_AREA_THRESHOLD
# Sort months from larger to smallest burned area and compute the
# cumulative sum.
sorted_groups = month_groups.sort_values(by="area", ascending=False)
sorted_groups = sorted_groups.reset_index(drop=True)
sorted_groups["cumulative_area"] = sorted_groups["area"].cumsum()
# Get the months with the largest burned area that compose more
# than 80% of the total burned area and change from month int to
# month abbreviation.
above_threshold = sorted_groups["cumulative_area"] >= threshold
fire_season_months = sorted_groups["month"].loc[:above_threshold.idxmax()]
fire_season_months = fire_season_months.sort_values()
fire_season_months = fire_season_months.apply(lambda x: month_abbr[x])
months = fire_season_months.str.cat(sep="-")
df = df.append({"window": region["name"], "months": months}, ignore_index=True)
save_to = os.path.join(output_folder, "fire_season_months.csv")
df.to_csv(save_to, index=False)
|
[
"os.path.exists",
"os.makedirs",
"os.path.join",
"os.chdir",
"pandas.read_excel",
"pandas.DataFrame"
] |
[((989, 1006), 'os.chdir', 'os.chdir', (['"""../.."""'], {}), "('../..')\n", (997, 1006), False, 'import os\n'), ((1129, 1171), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['window', 'months']"}), "(columns=['window', 'months'])\n", (1141, 1171), True, 'import pandas as pd\n'), ((2378, 2431), 'os.path.join', 'os.path.join', (['output_folder', '"""fire_season_months.csv"""'], {}), "(output_folder, 'fire_season_months.csv')\n", (2390, 2431), False, 'import os\n'), ((1053, 1082), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (1067, 1082), False, 'import os\n'), ((1092, 1118), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (1103, 1118), False, 'import os\n'), ((1224, 1313), 'pandas.read_excel', 'pd.read_excel', (['f"""results/xlsx/{region[\'name\']}/fire_groups.xlsx"""'], {'sheet_name': '"""Month"""'}), '(f"results/xlsx/{region[\'name\']}/fire_groups.xlsx", sheet_name\n =\'Month\')\n', (1237, 1313), True, 'import pandas as pd\n')]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow_probability import distributions as tfd
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from ncp import tools
def network(inputs, config):
hidden = inputs
for size in config.layer_sizes:
hidden = tf.layers.dense(hidden, size, tf.nn.leaky_relu)
mean = tf.layers.dense(hidden, 1)
noise = tf.layers.dense(hidden, 1, tf.nn.softplus) + 1e-6
uncertainty = tf.layers.dense(hidden, 1, None)
return mean, noise, uncertainty
def define_graph(config):
network_tpl = tf.make_template('network', network, config=config)
inputs = tf.placeholder(tf.float32, [None, config.num_inputs])
targets = tf.placeholder(tf.float32, [None, 1])
num_visible = tf.placeholder(tf.int32, [])
batch_size = tf.to_float(tf.shape(inputs)[0])
data_mean, data_noise, data_uncertainty = network_tpl(inputs)
ood_inputs = inputs + tf.random_normal(
tf.shape(inputs), 0.0, config.noise_std)
ood_mean, ood_noise, ood_uncertainty = network_tpl(ood_inputs)
losses = [
-tfd.Normal(data_mean, data_noise).log_prob(targets),
-tfd.Bernoulli(data_uncertainty).log_prob(0),
-tfd.Bernoulli(ood_uncertainty).log_prob(1),
]
if config.center_at_target:
losses.append(-tfd.Normal(ood_mean, ood_noise).log_prob(targets))
loss = sum(tf.reduce_sum(loss) for loss in losses) / batch_size
optimizer = tf.train.AdamOptimizer(config.learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(
loss, colocate_gradients_with_ops=True))
if config.clip_gradient:
gradients, _ = tf.clip_by_global_norm(gradients, config.clip_gradient)
optimize = optimizer.apply_gradients(zip(gradients, variables))
data_uncertainty = tf.sigmoid(data_uncertainty)
if not config.center_at_target:
data_mean = (1 - data_uncertainty) * data_mean + data_uncertainty * 0
data_noise = (1 - data_uncertainty) * data_noise + data_uncertainty * 0.1
return tools.AttrDict(locals())
|
[
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.clip_by_global_norm",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.reduce_sum",
"tensorflow_probability.distributions.Normal",
"tensorflow_probability.distributions.Bernoulli",
"tensorflow.compat.v1.layers.dense",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.make_template",
"tensorflow.compat.v1.sigmoid"
] |
[((666, 690), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (688, 690), True, 'import tensorflow.compat.v1 as tf\n'), ((867, 893), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', (['hidden', '(1)'], {}), '(hidden, 1)\n', (882, 893), True, 'import tensorflow.compat.v1 as tf\n'), ((970, 1002), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', (['hidden', '(1)', 'None'], {}), '(hidden, 1, None)\n', (985, 1002), True, 'import tensorflow.compat.v1 as tf\n'), ((1081, 1132), 'tensorflow.compat.v1.make_template', 'tf.make_template', (['"""network"""', 'network'], {'config': 'config'}), "('network', network, config=config)\n", (1097, 1132), True, 'import tensorflow.compat.v1 as tf\n'), ((1144, 1197), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32', '[None, config.num_inputs]'], {}), '(tf.float32, [None, config.num_inputs])\n', (1158, 1197), True, 'import tensorflow.compat.v1 as tf\n'), ((1210, 1247), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]'], {}), '(tf.float32, [None, 1])\n', (1224, 1247), True, 'import tensorflow.compat.v1 as tf\n'), ((1264, 1292), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', '[]'], {}), '(tf.int32, [])\n', (1278, 1292), True, 'import tensorflow.compat.v1 as tf\n'), ((1919, 1963), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['config.learning_rate'], {}), '(config.learning_rate)\n', (1941, 1963), True, 'import tensorflow.compat.v1 as tf\n'), ((2259, 2287), 'tensorflow.compat.v1.sigmoid', 'tf.sigmoid', (['data_uncertainty'], {}), '(data_uncertainty)\n', (2269, 2287), True, 'import tensorflow.compat.v1 as tf\n'), ((810, 857), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', (['hidden', 'size', 'tf.nn.leaky_relu'], {}), '(hidden, size, tf.nn.leaky_relu)\n', (825, 857), True, 'import tensorflow.compat.v1 as tf\n'), ((904, 946), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', (['hidden', '(1)', 'tf.nn.softplus'], {}), '(hidden, 1, tf.nn.softplus)\n', (919, 946), True, 'import tensorflow.compat.v1 as tf\n'), ((2116, 2171), 'tensorflow.compat.v1.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', 'config.clip_gradient'], {}), '(gradients, config.clip_gradient)\n', (2138, 2171), True, 'import tensorflow.compat.v1 as tf\n'), ((1320, 1336), 'tensorflow.compat.v1.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (1328, 1336), True, 'import tensorflow.compat.v1 as tf\n'), ((1453, 1469), 'tensorflow.compat.v1.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (1461, 1469), True, 'import tensorflow.compat.v1 as tf\n'), ((1852, 1871), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (1865, 1871), True, 'import tensorflow.compat.v1 as tf\n'), ((1579, 1612), 'tensorflow_probability.distributions.Normal', 'tfd.Normal', (['data_mean', 'data_noise'], {}), '(data_mean, data_noise)\n', (1589, 1612), True, 'from tensorflow_probability import distributions as tfd\n'), ((1639, 1670), 'tensorflow_probability.distributions.Bernoulli', 'tfd.Bernoulli', (['data_uncertainty'], {}), '(data_uncertainty)\n', (1652, 1670), True, 'from tensorflow_probability import distributions as tfd\n'), ((1691, 1721), 'tensorflow_probability.distributions.Bernoulli', 'tfd.Bernoulli', (['ood_uncertainty'], {}), '(ood_uncertainty)\n', (1704, 1721), True, 'from tensorflow_probability import distributions as tfd\n'), ((1788, 1819), 'tensorflow_probability.distributions.Normal', 'tfd.Normal', (['ood_mean', 'ood_noise'], {}), '(ood_mean, ood_noise)\n', (1798, 1819), True, 'from tensorflow_probability import distributions as tfd\n')]
|
from setuptools import setup, find_packages
setup(
name='SBIExperiments',
version='0.0.1',
url='https://github.com/astrodeepnet/sbi_experiments',
author='<NAME> and friends',
description='Package for numerical experiments of SBI tools',
packages=find_packages(),
install_requires=[
'numpy>=1.19.2',
'jax>=0.2.0',
'tensorflow_probability>=0.14.1',
'scikit-learn>=0.21',
'jaxopt>=0.2'
],
)
|
[
"setuptools.find_packages"
] |
[((259, 274), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (272, 274), False, 'from setuptools import setup, find_packages\n')]
|
#!/usr/bin/env python3
"""
Author : antoniog1
Date : 2019-02-21
Purpose: Rock the Casbah
"""
import argparse
import sys
import os
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('positional', metavar='DIR', type = str, help='A positional argument', nargs="+")
parser.add_argument('-w', '--width', help='A named integer argument', metavar='int', type=int, default=50)
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
width = args.width
directory = args.positional
for dir_name in directory:
dir_dict = {}
if not os.path.isdir(dir_name):
warn('"{}" is not a directory'.format(dir_name))
continue
print(dir_name)
for filename in os.listdir(dir_name):
path = os.path.join(dir_name,filename)
with open(path) as f:
first_line = f.readline().rstrip()
dir_dict[first_line] = filename
for line, file in sorted(dir_dict.items()):
num_per = width - len(line) - len(file)
ellipses = "." * num_per
print('{} {} {}'.format(line,ellipses,file))
# --------------------------------------------------
if __name__ == '__main__':
main()
|
[
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"os.path.isdir",
"sys.exit"
] |
[((254, 375), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Argparse Python script"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Argparse Python script',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (277, 375), False, 'import argparse\n'), ((927, 938), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (935, 938), False, 'import sys\n'), ((1345, 1365), 'os.listdir', 'os.listdir', (['dir_name'], {}), '(dir_name)\n', (1355, 1365), False, 'import os\n'), ((1189, 1212), 'os.path.isdir', 'os.path.isdir', (['dir_name'], {}), '(dir_name)\n', (1202, 1212), False, 'import os\n'), ((1398, 1430), 'os.path.join', 'os.path.join', (['dir_name', 'filename'], {}), '(dir_name, filename)\n', (1410, 1430), False, 'import os\n')]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
import google.auth
from google.cloud import bigquery
import pytest
import export_to_bigquery
GCLOUD_TESTS_PREFIX = "python_samples_tests"
@pytest.fixture
def project_id():
_, project_id = google.auth.default()
return project_id
@pytest.fixture
def unique_id():
uuid_hex = uuid.uuid4().hex[:8]
return f"{GCLOUD_TESTS_PREFIX}_{uuid_hex}"
@pytest.fixture
def bigquery_resources(project_id, unique_id):
# Create a BigQuery dataset.
bigquery_client = bigquery.Client()
dataset_id = unique_id
table_id = unique_id
dataset = bigquery.Dataset(f"{project_id}.{dataset_id}")
dataset.location = "US"
bigquery_client.create_dataset(dataset, timeout=30)
# Create a BigQuery table under the created dataset.
table = bigquery.Table(f"{project_id}.{dataset_id}.{table_id}")
bigquery_client.create_table(table)
yield dataset_id, table_id
# Delete the BigQuery dataset and table.
bigquery_client.delete_dataset(dataset_id, delete_contents=True)
def test_export_data_to_bigquery(capsys, project_id, bigquery_resources):
dataset_id, table_id = bigquery_resources
export_to_bigquery.export_to_bigquery(project_id, project_id, dataset_id, table_id)
out, err = capsys.readouterr()
assert "Exported data to BigQuery" in out
|
[
"uuid.uuid4",
"google.cloud.bigquery.Dataset",
"google.cloud.bigquery.Table",
"export_to_bigquery.export_to_bigquery",
"google.cloud.bigquery.Client"
] |
[((1069, 1086), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {}), '()\n', (1084, 1086), False, 'from google.cloud import bigquery\n'), ((1154, 1200), 'google.cloud.bigquery.Dataset', 'bigquery.Dataset', (['f"""{project_id}.{dataset_id}"""'], {}), "(f'{project_id}.{dataset_id}')\n", (1170, 1200), False, 'from google.cloud import bigquery\n'), ((1355, 1410), 'google.cloud.bigquery.Table', 'bigquery.Table', (['f"""{project_id}.{dataset_id}.{table_id}"""'], {}), "(f'{project_id}.{dataset_id}.{table_id}')\n", (1369, 1410), False, 'from google.cloud import bigquery\n'), ((1724, 1811), 'export_to_bigquery.export_to_bigquery', 'export_to_bigquery.export_to_bigquery', (['project_id', 'project_id', 'dataset_id', 'table_id'], {}), '(project_id, project_id, dataset_id,\n table_id)\n', (1761, 1811), False, 'import export_to_bigquery\n'), ((881, 893), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (891, 893), False, 'import uuid\n')]
|
import serial
import RPi.GPIO as GPIO
import time
ser=serial.Serial("/dev/ttyACM0",9600)
start_time = time.time()
imu = open("IMU.txt","w")
while time.time() - start_time <= 1:
ser.readline()
while time.time() - start_time <= 8:
read_ser=ser.readline()
if float(read_ser) == 0.00:
pass
else:
read = read_ser.strip('\n')
imu.write(read)
imu.write('\n')
imu.close()
|
[
"serial.Serial",
"time.time"
] |
[((59, 94), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyACM0"""', '(9600)'], {}), "('/dev/ttyACM0', 9600)\n", (72, 94), False, 'import serial\n'), ((108, 119), 'time.time', 'time.time', ([], {}), '()\n', (117, 119), False, 'import time\n'), ((156, 167), 'time.time', 'time.time', ([], {}), '()\n', (165, 167), False, 'import time\n'), ((216, 227), 'time.time', 'time.time', ([], {}), '()\n', (225, 227), False, 'import time\n')]
|
from pyg_base._types import is_iterable
from pyg_base._loop import len0
__all__ = ['zipper', 'lens']
def lens(*values):
"""
measures (and enforces) a common length across all values
:Parameters:
----------------
*values : lists
Raises
------
ValueError
if you have values with multi lengths.
:Returns:
-------
int
common length.
:Example:
--------------
>>> assert lens() == 0
>>> assert lens([1,2,3], [2,4,5]) == 3
>>> assert lens([1,2,3], [2,4,5], [6]) == 3
"""
if len0(values) == 0:
return 0
all_lens = [len0(value) for value in values]
lens = set(all_lens) - {1}
if len(lens)>1:
raise ValueError('found multiple lengths %s '%lens)
return list(lens)[0] if lens else 1
def zipper(*values):
"""
a safer version of zip
:Examples: zipper works with single values as well as full list:
---------------
>>> assert list(zipper([1,2,3], 4)) == [(1, 4), (2, 4), (3, 4)]
>>> assert list(zipper([1,2,3], [4,5,6])) == [(1, 4), (2, 5), (3, 6)]
>>> assert list(zipper([1,2,3], [4,5,6], [7])) == [(1, 4, 7), (2, 5, 7), (3, 6, 7)]
>>> assert list(zipper([1,2,3], [4,5,6], None)) == [(1, 4, None), (2, 5, None), (3, 6, None)]
>>> assert list(zipper((1,2,3), np.array([4,5,6]), None)) == [(1, 4, None), (2, 5, None), (3, 6, None)]
:Examples: zipper rejects multi-length lists
---------------
>>> import pytest
>>> with pytest.raises(ValueError):
>>> zipper([1,2,3], [4,5])
:Parameters:
----------------
*values : lists
values to be zipped
:Returns:
-------
zipped values
"""
values = [list(value) if isinstance(value, zip) else value if is_iterable(value) else [value] for value in values]
n = lens(*values)
values = [value * n if len(value) == 1 else value for value in values]
return zip(*values)
|
[
"pyg_base._types.is_iterable",
"pyg_base._loop.len0"
] |
[((569, 581), 'pyg_base._loop.len0', 'len0', (['values'], {}), '(values)\n', (573, 581), False, 'from pyg_base._loop import len0\n'), ((621, 632), 'pyg_base._loop.len0', 'len0', (['value'], {}), '(value)\n', (625, 632), False, 'from pyg_base._loop import len0\n'), ((1775, 1793), 'pyg_base._types.is_iterable', 'is_iterable', (['value'], {}), '(value)\n', (1786, 1793), False, 'from pyg_base._types import is_iterable\n')]
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running tests for custom eslint checks."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import subprocess
import sys
from core import python_utils
from scripts import common
def main():
"""Run the tests."""
node_path = os.path.join(common.NODE_PATH, 'bin', 'node')
nyc_path = os.path.join('node_modules', 'nyc', 'bin', 'nyc.js')
mocha_path = os.path.join('node_modules', 'mocha', 'bin', 'mocha')
filepath = 'scripts/linters/custom_eslint_checks/rules/'
proc_args = [node_path, nyc_path, mocha_path, filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_tests_stdout, encoded_tests_stderr = proc.communicate()
# Standard and error output is in bytes, we need to decode the line to
# print it.
tests_stdout = encoded_tests_stdout.decode('utf-8')
tests_stderr = encoded_tests_stderr.decode('utf-8')
if tests_stderr:
python_utils.PRINT(tests_stderr)
sys.exit(1)
python_utils.PRINT(tests_stdout)
if 'failing' in tests_stdout:
python_utils.PRINT('---------------------------')
python_utils.PRINT('Tests not passed')
python_utils.PRINT('---------------------------')
sys.exit(1)
else:
python_utils.PRINT('---------------------------')
python_utils.PRINT('All tests passed')
python_utils.PRINT('---------------------------')
coverage_result = re.search = re.search(
r'All files\s*\|\s*(?P<stmts>\S+)\s*\|\s*(?P<branch>\S+)\s*\|\s*'
r'(?P<funcs>\S+)\s*\|\s*(?P<lines>\S+)\s*\|\s*', tests_stdout)
if (coverage_result.group('stmts') != '100' or
coverage_result.group('branch') != '100' or
coverage_result.group('funcs') != '100' or
coverage_result.group('lines') != '100'):
raise Exception('Eslint test coverage is not 100%')
if __name__ == '__main__':
main()
|
[
"subprocess.Popen",
"core.python_utils.PRINT",
"os.path.join",
"sys.exit",
"re.search"
] |
[((923, 968), 'os.path.join', 'os.path.join', (['common.NODE_PATH', '"""bin"""', '"""node"""'], {}), "(common.NODE_PATH, 'bin', 'node')\n", (935, 968), False, 'import os\n'), ((984, 1036), 'os.path.join', 'os.path.join', (['"""node_modules"""', '"""nyc"""', '"""bin"""', '"""nyc.js"""'], {}), "('node_modules', 'nyc', 'bin', 'nyc.js')\n", (996, 1036), False, 'import os\n'), ((1054, 1107), 'os.path.join', 'os.path.join', (['"""node_modules"""', '"""mocha"""', '"""bin"""', '"""mocha"""'], {}), "('node_modules', 'mocha', 'bin', 'mocha')\n", (1066, 1107), False, 'import os\n'), ((1241, 1316), 'subprocess.Popen', 'subprocess.Popen', (['proc_args'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (1257, 1316), False, 'import subprocess\n'), ((1684, 1716), 'core.python_utils.PRINT', 'python_utils.PRINT', (['tests_stdout'], {}), '(tests_stdout)\n', (1702, 1716), False, 'from core import python_utils\n'), ((2142, 2304), 're.search', 're.search', (['"""All files\\\\s*\\\\|\\\\s*(?P<stmts>\\\\S+)\\\\s*\\\\|\\\\s*(?P<branch>\\\\S+)\\\\s*\\\\|\\\\s*(?P<funcs>\\\\S+)\\\\s*\\\\|\\\\s*(?P<lines>\\\\S+)\\\\s*\\\\|\\\\s*"""', 'tests_stdout'], {}), "(\n 'All files\\\\s*\\\\|\\\\s*(?P<stmts>\\\\S+)\\\\s*\\\\|\\\\s*(?P<branch>\\\\S+)\\\\s*\\\\|\\\\s*(?P<funcs>\\\\S+)\\\\s*\\\\|\\\\s*(?P<lines>\\\\S+)\\\\s*\\\\|\\\\s*'\n , tests_stdout)\n", (2151, 2304), False, 'import re\n'), ((1627, 1659), 'core.python_utils.PRINT', 'python_utils.PRINT', (['tests_stderr'], {}), '(tests_stderr)\n', (1645, 1659), False, 'from core import python_utils\n'), ((1668, 1679), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1676, 1679), False, 'import sys\n'), ((1759, 1808), 'core.python_utils.PRINT', 'python_utils.PRINT', (['"""---------------------------"""'], {}), "('---------------------------')\n", (1777, 1808), False, 'from core import python_utils\n'), ((1817, 1855), 'core.python_utils.PRINT', 'python_utils.PRINT', (['"""Tests not passed"""'], {}), "('Tests not passed')\n", (1835, 1855), False, 'from core import python_utils\n'), ((1864, 1913), 'core.python_utils.PRINT', 'python_utils.PRINT', (['"""---------------------------"""'], {}), "('---------------------------')\n", (1882, 1913), False, 'from core import python_utils\n'), ((1922, 1933), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1930, 1933), False, 'import sys\n'), ((1952, 2001), 'core.python_utils.PRINT', 'python_utils.PRINT', (['"""---------------------------"""'], {}), "('---------------------------')\n", (1970, 2001), False, 'from core import python_utils\n'), ((2010, 2048), 'core.python_utils.PRINT', 'python_utils.PRINT', (['"""All tests passed"""'], {}), "('All tests passed')\n", (2028, 2048), False, 'from core import python_utils\n'), ((2057, 2106), 'core.python_utils.PRINT', 'python_utils.PRINT', (['"""---------------------------"""'], {}), "('---------------------------')\n", (2075, 2106), False, 'from core import python_utils\n')]
|
from __future__ import print_function
from pyomo.environ import *
from pyomo.core.base import Constraint, Objective, Suffix, minimize
from pyomo.opt import ProblemFormat, SolverFactory
from nmpc_mhe.dync.NMPCGenv2 import NmpcGen
from nmpc_mhe.mods.bfb.nob5_hi_t import bfb_dae
from snap_shot import snap
import sys, os
import itertools, sys
from numpy.random import normal as npm
# SWITCH TO JUST ONE COLLOCATION POINT AND FINITE ELEMENT
states = ["Hgc", "Nsc", "Hsc", "Hge", "Nse", "Hse"]
# x_noisy = ["Ngb", "Hgb", "Ngc", "Hgc", "Nsc", "Hsc", "Nge", "Hge", "Nse", "Hse", "mom"]
# x_noisy = ["Hse"]
x_noisy = ["Hgc", "Nsc", "Hsc", "Hge", "Nse", "Hse"]
u = ["u1"]
u_bounds = {"u1":(162.183495794 * 0.0005, 162.183495794 * 10000)}
ref_state = {("c_capture", ((),)): 0.63}
# Known targets 0.38, 0.4, 0.5
nfe_mhe = 10
y = ["Tgb", "vg"]
nfet = 10
ncpx = 3
nfex = 5
tfe = [i for i in range(1, nfe_mhe + 1)]
lfe = [i for i in range(1, nfex + 1)]
lcp = [i for i in range(1, ncpx + 1)]
lc = ['c', 'h', 'n']
y_vars = {
"Tgb": [i for i in itertools.product(lfe, lcp)],
"vg": [i for i in itertools.product(lfe, lcp)]
}
# x_vars = dict()
x_vars = {
# "Nge": [i for i in itertools.product(lfe, lcp, lc)],
# "Hge": [i for i in itertools.product(lfe, lcp)],
"Nsc": [i for i in itertools.product(lfe, lcp, lc)],
"Hsc": [i for i in itertools.product(lfe, lcp)],
"Nse": [i for i in itertools.product(lfe, lcp, lc)],
"Hse": [i for i in itertools.product(lfe, lcp)],
"Hgc": [i for i in itertools.product(lfe, lcp)],
"Hge": [i for i in itertools.product(lfe, lcp)],
# "mom": [i for i in itertools.product(lfe, lcp)]
}
# States -- (5 * 3 + 6) * fe_x * cp_x.
# For fe_x = 5 and cp_x = 3 we will have 315 differential-states.
e = NmpcGen(bfb_dae, 400/nfe_mhe, states, u,
ref_state=ref_state, u_bounds=u_bounds,
nfe_tnmpc=nfe_mhe, ncp_tnmpc=1,
nfe_t=5, ncp_t=1)
# 10 fe & _t=1000 definitely degenerate
# 10 fe & _t=900 definitely degenerate
# 10 fe & _t=120 sort-of degenerate
# 10 fe & _t=50 sort-of degenerate
# 10 fe & _t=50 eventually sort-of degenerate
# 10 fe & _t=1 eventually sort-of degenerate
e.SteadyRef.dref = snap
e.load_iguess_steady()
e.SteadyRef.create_bounds()
e.solve_steady_ref()
e.SteadyRef.report_zL(filename="mult_ss")
e.load_d_s(e.PlantSample)
e.PlantSample.create_bounds()
e.solve_dyn(e.PlantSample)
q_cov = {}
for i in tfe:
for j in itertools.product(lfe, lcp, lc):
q_cov[("Nse", j), ("Nse", j), i] = 7525.81478168 * 0.005
q_cov[("Nsc", j), ("Nsc", j), i] = 117.650089456 * 0.005
# q_cov[("Nse", j), ("Nse", j), i] = 735.706082714 * 0.005
for i in tfe:
for j in itertools.product(lfe, lcp):
# q_cov[("Hge", j), ("Hge", j), i] = 2194.25390583 * 0.005
q_cov[("Hse", j), ("Hse", j), i] = 731143.716603 * 0.005
q_cov[("Hsc", j), ("Hsc", j), i] = 16668.3312216 * 0.005
q_cov[("Hge", j), ("Hge", j), i] = 2166.86838591 * 0.005
q_cov[("Hgc", j), ("Hgc", j), i] = 47.7911012193 * 0.005
# q_cov[("mom", j), ("mom", j), i] = 1.14042251669 * 0.005
# for i in lfe:
# for j in [(1,1, 'c'), (5,3, 'c')]:
# m_cov[("yb", j), ("yb", j), i] = 1e-04
u_cov = {}
for i in [i for i in range(1, nfe_mhe+1)]:
u_cov["u1", i] = 162.183495794 * 0.005
m_cov = {}
for i in tfe:
for j in itertools.product(lfe, lcp):
m_cov[("Tgb", j), ("Tgb", j), i] = 40 * 0.005
m_cov[("vg", j), ("vg", j), i] = 0.902649386907 * 0.005
e.find_target_ss() #: Compute target-steady state (beforehand)
#: Create NMPC
e.create_nmpc()
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+08)
e.solve_dyn(e.PlantSample, stop_if_nopt=True)
ipsr = SolverFactory('ipopt', executable="/home/dav0/Apps/IpoptSR/Ipopt/build/bin/ipoptSR")
ref_state = {("c_capture", ((),)): 0.50}
e.find_target_ss(ref_state=ref_state) #: Compute target-steady state (beforehand)
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+08)
for i in range(1, 1000):
ps = e.solve_dyn(e.PlantSample, stop_if_nopt=False)
e.PlantSample.write_nl(name="baddie.nl")
e.PlantSample.pprint(filename="baddie.txt")
e.PlantSample.snap_shot(filename="baddie.py")
e.PlantSample.report_zL(filename="bad_bounds")
if ps != 0:
e.PlantSample.write_nl(name="baddie.nl")
e.PlantSample.pprint(filename="baddie.txt")
e.PlantSample.snap_shot(filename="baddie.py")
e.PlantSample.report_zL(filename="bad_bounds")
e.solve_dyn(e.PlantSample, stop_if_nopt=True)
e.update_state_real() # update the current state
e.update_soi_sp_nmpc()
#
e.initialize_olnmpc(e.PlantSample, "real")
e.load_init_state_nmpc(src_kind="state_dict", state_dict="real")
stat_nmpc = e.solve_dyn(e.olnmpc, skip_update=False, max_cpu_time=300)
# if stat_nmpc != 0:
# stat_nmpc = e.solve_dyn(e.olnmpc,
# stop_if_nopt=True,
# skip_update=False,
# iter_max=300, ma57_pivtol=1e-12)
if stat_nmpc != 0:
strategy = 1
if strategy == 1:
if e.nfe_tnmpc == 1:
pass
else:
e.create_nmpc(newnfe=e.ncp_tnmpc-1, newncp=1)
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+02)
e.initialize_olnmpc(e.PlantSample, "real")
e.load_init_state_nmpc(src_kind="state_dict", state_dict="real")
stat_nmpc = e.solve_dyn(e.olnmpc, skip_update=False, max_cpu_time=300)
else:
e.olnmpc.write_nl(name="bad.nl")
# e.olnmpc.pprint(filename="bad_" + str(i))
with open("ipopt.opt", "w") as f:
f.write("linear_solver ma57\n"
"ma57_dep_tol 1e-8\nbig_M 1e30\n")
f.close()
ipsr.solve(e.olnmpc, tee=True)
e.update_u(e.olnmpc)
e.print_r_nmpc()
e.cycleSamPlant(plant_step=True)
e.plant_uinject(e.PlantSample, src_kind="dict", nsteps=10, skip_homotopy=True)
# e.plant_input_gen(e.PlantSample, "mod", src=e.ss2)
|
[
"nmpc_mhe.dync.NMPCGenv2.NmpcGen",
"itertools.product",
"pyomo.opt.SolverFactory"
] |
[((1825, 1962), 'nmpc_mhe.dync.NMPCGenv2.NmpcGen', 'NmpcGen', (['bfb_dae', '(400 / nfe_mhe)', 'states', 'u'], {'ref_state': 'ref_state', 'u_bounds': 'u_bounds', 'nfe_tnmpc': 'nfe_mhe', 'ncp_tnmpc': '(1)', 'nfe_t': '(5)', 'ncp_t': '(1)'}), '(bfb_dae, 400 / nfe_mhe, states, u, ref_state=ref_state, u_bounds=\n u_bounds, nfe_tnmpc=nfe_mhe, ncp_tnmpc=1, nfe_t=5, ncp_t=1)\n', (1832, 1962), False, 'from nmpc_mhe.dync.NMPCGenv2 import NmpcGen\n'), ((3806, 3895), 'pyomo.opt.SolverFactory', 'SolverFactory', (['"""ipopt"""'], {'executable': '"""/home/dav0/Apps/IpoptSR/Ipopt/build/bin/ipoptSR"""'}), "('ipopt', executable=\n '/home/dav0/Apps/IpoptSR/Ipopt/build/bin/ipoptSR')\n", (3819, 3895), False, 'from pyomo.opt import ProblemFormat, SolverFactory\n'), ((2497, 2528), 'itertools.product', 'itertools.product', (['lfe', 'lcp', 'lc'], {}), '(lfe, lcp, lc)\n', (2514, 2528), False, 'import itertools, sys\n'), ((2758, 2785), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (2775, 2785), False, 'import itertools, sys\n'), ((3425, 3452), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (3442, 3452), False, 'import itertools, sys\n'), ((1038, 1065), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1055, 1065), False, 'import itertools, sys\n'), ((1090, 1117), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1107, 1117), False, 'import itertools, sys\n'), ((1309, 1340), 'itertools.product', 'itertools.product', (['lfe', 'lcp', 'lc'], {}), '(lfe, lcp, lc)\n', (1326, 1340), False, 'import itertools, sys\n'), ((1372, 1399), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1389, 1399), False, 'import itertools, sys\n'), ((1431, 1462), 'itertools.product', 'itertools.product', (['lfe', 'lcp', 'lc'], {}), '(lfe, lcp, lc)\n', (1448, 1462), False, 'import itertools, sys\n'), ((1494, 1521), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1511, 1521), False, 'import itertools, sys\n'), ((1553, 1580), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1570, 1580), False, 'import itertools, sys\n'), ((1612, 1639), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1629, 1639), False, 'import itertools, sys\n')]
|
from urllib.parse import urlparse
from quart import current_app as app, request, jsonify
def filter_referrers():
filters = app.config.get('REFERRERS_FILTER')
if not filters:
return None
referrer = request.referrer
if referrer:
parsed = urlparse(referrer)
for filter in filters:
if parsed.hostname.endswith(filter):
return None
return jsonify({
'ok': False,
'error': 'Unauthorized',
}), 403
|
[
"urllib.parse.urlparse",
"quart.jsonify",
"quart.current_app.config.get"
] |
[((130, 164), 'quart.current_app.config.get', 'app.config.get', (['"""REFERRERS_FILTER"""'], {}), "('REFERRERS_FILTER')\n", (144, 164), True, 'from quart import current_app as app, request, jsonify\n'), ((271, 289), 'urllib.parse.urlparse', 'urlparse', (['referrer'], {}), '(referrer)\n', (279, 289), False, 'from urllib.parse import urlparse\n'), ((409, 456), 'quart.jsonify', 'jsonify', (["{'ok': False, 'error': 'Unauthorized'}"], {}), "({'ok': False, 'error': 'Unauthorized'})\n", (416, 456), False, 'from quart import current_app as app, request, jsonify\n')]
|
from django.db import models
from filer.fields.file import FilerFileField
class FakeLink(models.Model):
"""
In our widget we need to manually render a AdminFileFormField. Basically for every other Field type this is not
a problem at all, but Failer needs a rel attribute which consists of a reverse relationship. We fake it
with this model.
"""
fake_file = FilerFileField(blank=True, null=True, on_delete=models.CASCADE)
|
[
"filer.fields.file.FilerFileField"
] |
[((383, 446), 'filer.fields.file.FilerFileField', 'FilerFileField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'models.CASCADE'}), '(blank=True, null=True, on_delete=models.CASCADE)\n', (397, 446), False, 'from filer.fields.file import FilerFileField\n')]
|
from __future__ import print_function
import atexit
import errno
import logging
import os
import select
import signal
import sys
import time
from process_tests import setup_coverage
TIMEOUT = int(os.getenv('MANHOLE_TEST_TIMEOUT', 10))
SOCKET_PATH = '/tmp/manhole-socket'
OUTPUT = sys.__stdout__
def handle_sigterm(signo, _frame):
# Simulate real termination
print("Terminated", file=OUTPUT)
sys.exit(128 + signo)
# Handling sigterm ensure that atexit functions are called, and we do not leave
# leftover /tmp/manhole-pid sockets.
signal.signal(signal.SIGTERM, handle_sigterm)
@atexit.register
def log_exit():
print("In atexit handler.", file=OUTPUT)
def setup_greenthreads(patch_threads=False):
try:
from gevent import monkey
monkey.patch_all(thread=False)
except (ImportError, SyntaxError):
pass
try:
import eventlet
eventlet.monkey_patch(thread=False)
except (ImportError, SyntaxError):
pass
def do_fork():
pid = os.fork()
if pid:
@atexit.register
def cleanup():
try:
os.kill(pid, signal.SIGINT)
time.sleep(0.2)
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
os.waitpid(pid, 0)
else:
time.sleep(TIMEOUT * 10)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='[pid=%(process)d - %(asctime)s]: %(name)s - %(levelname)s - %(message)s',
)
test_name = sys.argv[1]
try:
setup_coverage()
if os.getenv('PATCH_THREAD', False):
import manhole
setup_greenthreads(True)
else:
setup_greenthreads(True)
import manhole
if test_name == 'test_activate_on_usr2':
manhole.install(activate_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_install_once':
manhole.install()
try:
manhole.install()
except manhole.AlreadyInstalled:
print('ALREADY_INSTALLED')
else:
raise AssertionError("Did not raise AlreadyInstalled")
elif test_name == 'test_stderr_doesnt_deadlock':
import subprocess
manhole.install()
for i in range(50):
print('running iteration', i)
p = subprocess.Popen(['true'])
print('waiting for process', p.pid)
p.wait()
print('process ended')
path = '/tmp/manhole-%d' % p.pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
print('SUCCESS')
elif test_name == 'test_fork_exec':
manhole.install(reinstall_delay=5)
print("Installed.")
time.sleep(0.2)
pid = os.fork()
print("Forked, pid =", pid)
if pid:
os.waitpid(pid, 0)
path = '/tmp/manhole-%d' % pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
else:
try:
time.sleep(1)
print("Exec-ing `true`")
os.execvp('true', ['true'])
finally:
os._exit(1)
print('SUCCESS')
elif test_name == 'test_activate_on_with_oneshot_on':
manhole.install(activate_on='USR2', oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_interrupt_on_accept':
def handle_usr2(_sig, _frame):
print('Got USR2')
signal.signal(signal.SIGUSR2, handle_usr2)
import ctypes
import ctypes.util
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
raise ImportError
libpthread = ctypes.CDLL(libpthread_path)
if not hasattr(libpthread, "pthread_setname_np"):
raise ImportError
pthread_kill = libpthread.pthread_kill
pthread_kill.argtypes = [ctypes.c_void_p, ctypes.c_int]
pthread_kill.restype = ctypes.c_int
manhole.install(sigmask=None)
for i in range(15):
time.sleep(0.1)
print("Sending signal to manhole thread ...")
pthread_kill(manhole._INST.ident, signal.SIGUSR2)
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_oneshot_on_usr2':
manhole.install(oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name.startswith('test_signalfd_weirdness'):
if 'negative' in test_name:
manhole.install(sigmask=None)
else:
manhole.install(sigmask=[signal.SIGCHLD])
time.sleep(0.3) # give the manhole a bit enough time to start
print('Starting ...')
import signalfd
signalfd.sigprocmask(signalfd.SIG_BLOCK, [signal.SIGCHLD])
fd = signalfd.signalfd(0, [signal.SIGCHLD], signalfd.SFD_NONBLOCK|signalfd.SFD_CLOEXEC)
for i in range(200):
print('Forking %s:' % i)
pid = os.fork()
print(' - [%s/%s] forked' % (i, pid))
if pid:
while 1:
print(' - [%s/%s] selecting on: %s' % (i, pid, [fd]))
read_ready, _, errors = select.select([fd], [], [fd], 1)
if read_ready:
try:
print(' - [%s/%s] reading from signalfd ...' % (i, pid))
print(' - [%s] read from signalfd: %r ' % (i, os.read(fd, 128)))
break
except OSError as exc:
print(' - [%s/%s] reading from signalfd failed with errno %s' % (i, pid, exc.errno))
else:
print(' - [%s/%s] reading from signalfd failed - not ready !' % (i, pid))
if 'negative' in test_name:
time.sleep(1)
if errors:
raise RuntimeError("fd has error")
else:
print(' - [%s/%s] exiting' % (i, pid))
os._exit(0)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_auth_fail':
manhole.get_peercred = lambda _: (-1, -1, -1)
manhole.install()
time.sleep(TIMEOUT * 10)
elif test_name == 'test_socket_path':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_daemon_connection':
manhole.install(daemon_connection=True)
time.sleep(TIMEOUT)
elif test_name == 'test_socket_path_with_fork':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
do_fork()
elif test_name == 'test_locals':
manhole.install(socket_path=SOCKET_PATH,
locals={'k1': 'v1', 'k2': 'v2'})
time.sleep(TIMEOUT)
elif test_name == 'test_locals_after_fork':
manhole.install(locals={'k1': 'v1', 'k2': 'v2'})
do_fork()
elif test_name == 'test_redirect_stderr_default':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
elif test_name == 'test_redirect_stderr_disabled':
manhole.install(socket_path=SOCKET_PATH, redirect_stderr=False)
time.sleep(TIMEOUT)
elif test_name == 'test_sigmask':
manhole.install(socket_path=SOCKET_PATH, sigmask=[signal.SIGUSR1])
time.sleep(TIMEOUT)
else:
manhole.install()
time.sleep(0.3) # give the manhole a bit enough time to start
if test_name == 'test_simple':
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_forkpty':
time.sleep(1)
pid, masterfd = os.forkpty()
if pid:
@atexit.register
def cleanup():
try:
os.kill(pid, signal.SIGINT)
time.sleep(0.2)
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
while not os.waitpid(pid, os.WNOHANG)[0]:
try:
os.write(2, os.read(masterfd, 1024))
except OSError as e:
print("Error while reading from masterfd:", e)
else:
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_fork':
time.sleep(1)
do_fork()
else:
raise RuntimeError('Invalid test spec.')
except: # pylint: disable=W0702
print('Died with %s.' % sys.exc_info()[0].__name__, file=OUTPUT)
import traceback
traceback.print_exc(file=OUTPUT)
print('DIED.', file=OUTPUT)
|
[
"time.sleep",
"sys.exc_info",
"manhole.install",
"eventlet.monkey_patch",
"sys.exit",
"os.fork",
"ctypes.CDLL",
"signalfd.signalfd",
"os.read",
"os.forkpty",
"os.path.exists",
"os.kill",
"gevent.monkey.patch_all",
"subprocess.Popen",
"os.unlink",
"traceback.print_exc",
"select.select",
"os.waitpid",
"signalfd.sigprocmask",
"logging.basicConfig",
"os.execvp",
"signal.signal",
"process_tests.setup_coverage",
"ctypes.util.find_library",
"os.getenv",
"os._exit"
] |
[((548, 593), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'handle_sigterm'], {}), '(signal.SIGTERM, handle_sigterm)\n', (561, 593), False, 'import signal\n'), ((199, 236), 'os.getenv', 'os.getenv', (['"""MANHOLE_TEST_TIMEOUT"""', '(10)'], {}), "('MANHOLE_TEST_TIMEOUT', 10)\n", (208, 236), False, 'import os\n'), ((408, 429), 'sys.exit', 'sys.exit', (['(128 + signo)'], {}), '(128 + signo)\n', (416, 429), False, 'import sys\n'), ((1012, 1021), 'os.fork', 'os.fork', ([], {}), '()\n', (1019, 1021), False, 'import os\n'), ((1425, 1552), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""[pid=%(process)d - %(asctime)s]: %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.DEBUG, format=\n '[pid=%(process)d - %(asctime)s]: %(name)s - %(levelname)s - %(message)s')\n", (1444, 1552), False, 'import logging\n'), ((772, 802), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {'thread': '(False)'}), '(thread=False)\n', (788, 802), False, 'from gevent import monkey\n'), ((897, 932), 'eventlet.monkey_patch', 'eventlet.monkey_patch', ([], {'thread': '(False)'}), '(thread=False)\n', (918, 932), False, 'import eventlet\n'), ((1330, 1348), 'os.waitpid', 'os.waitpid', (['pid', '(0)'], {}), '(pid, 0)\n', (1340, 1348), False, 'import os\n'), ((1367, 1391), 'time.sleep', 'time.sleep', (['(TIMEOUT * 10)'], {}), '(TIMEOUT * 10)\n', (1377, 1391), False, 'import time\n'), ((1617, 1633), 'process_tests.setup_coverage', 'setup_coverage', ([], {}), '()\n', (1631, 1633), False, 'from process_tests import setup_coverage\n'), ((1646, 1678), 'os.getenv', 'os.getenv', (['"""PATCH_THREAD"""', '(False)'], {}), "('PATCH_THREAD', False)\n", (1655, 1678), False, 'import os\n'), ((1884, 1919), 'manhole.install', 'manhole.install', ([], {'activate_on': '"""USR2"""'}), "(activate_on='USR2')\n", (1899, 1919), False, 'import manhole\n'), ((9614, 9646), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'OUTPUT'}), '(file=OUTPUT)\n', (9633, 9646), False, 'import traceback\n'), ((1115, 1142), 'os.kill', 'os.kill', (['pid', 'signal.SIGINT'], {}), '(pid, signal.SIGINT)\n', (1122, 1142), False, 'import os\n'), ((1159, 1174), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1169, 1174), False, 'import time\n'), ((1191, 1219), 'os.kill', 'os.kill', (['pid', 'signal.SIGTERM'], {}), '(pid, signal.SIGTERM)\n', (1198, 1219), False, 'import os\n'), ((1979, 1994), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1989, 1994), False, 'import time\n'), ((2054, 2071), 'manhole.install', 'manhole.install', ([], {}), '()\n', (2069, 2071), False, 'import manhole\n'), ((2105, 2122), 'manhole.install', 'manhole.install', ([], {}), '()\n', (2120, 2122), False, 'import manhole\n'), ((2399, 2416), 'manhole.install', 'manhole.install', ([], {}), '()\n', (2414, 2416), False, 'import manhole\n'), ((2516, 2542), 'subprocess.Popen', 'subprocess.Popen', (["['true']"], {}), "(['true'])\n", (2532, 2542), False, 'import subprocess\n'), ((2727, 2747), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2741, 2747), False, 'import os\n'), ((2931, 2965), 'manhole.install', 'manhole.install', ([], {'reinstall_delay': '(5)'}), '(reinstall_delay=5)\n', (2946, 2965), False, 'import manhole\n'), ((3010, 3025), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (3020, 3025), False, 'import time\n'), ((3044, 3053), 'os.fork', 'os.fork', ([], {}), '()\n', (3051, 3053), False, 'import os\n'), ((2769, 2784), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (2778, 2784), False, 'import os\n'), ((3130, 3148), 'os.waitpid', 'os.waitpid', (['pid', '(0)'], {}), '(pid, 0)\n', (3140, 3148), False, 'import os\n'), ((3215, 3235), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3229, 3235), False, 'import os\n'), ((3660, 3714), 'manhole.install', 'manhole.install', ([], {'activate_on': '"""USR2"""', 'oneshot_on': '"""USR2"""'}), "(activate_on='USR2', oneshot_on='USR2')\n", (3675, 3714), False, 'import manhole\n'), ((9540, 9554), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9552, 9554), False, 'import sys\n'), ((3257, 3272), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (3266, 3272), False, 'import os\n'), ((3393, 3406), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3403, 3406), False, 'import time\n'), ((3472, 3499), 'os.execvp', 'os.execvp', (['"""true"""', "['true']"], {}), "('true', ['true'])\n", (3481, 3499), False, 'import os\n'), ((3545, 3556), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (3553, 3556), False, 'import os\n'), ((3774, 3789), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3784, 3789), False, 'import time\n'), ((3933, 3975), 'signal.signal', 'signal.signal', (['signal.SIGUSR2', 'handle_usr2'], {}), '(signal.SIGUSR2, handle_usr2)\n', (3946, 3975), False, 'import signal\n'), ((4064, 4099), 'ctypes.util.find_library', 'ctypes.util.find_library', (['"""pthread"""'], {}), "('pthread')\n", (4088, 4099), False, 'import ctypes\n'), ((4195, 4223), 'ctypes.CDLL', 'ctypes.CDLL', (['libpthread_path'], {}), '(libpthread_path)\n', (4206, 4223), False, 'import ctypes\n'), ((4499, 4528), 'manhole.install', 'manhole.install', ([], {'sigmask': 'None'}), '(sigmask=None)\n', (4514, 4528), False, 'import manhole\n'), ((4577, 4592), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4587, 4592), False, 'import time\n'), ((4772, 4787), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4782, 4787), False, 'import time\n'), ((4850, 4884), 'manhole.install', 'manhole.install', ([], {'oneshot_on': '"""USR2"""'}), "(oneshot_on='USR2')\n", (4865, 4884), False, 'import manhole\n'), ((4945, 4960), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4955, 4960), False, 'import time\n'), ((5197, 5212), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (5207, 5212), False, 'import time\n'), ((5334, 5392), 'signalfd.sigprocmask', 'signalfd.sigprocmask', (['signalfd.SIG_BLOCK', '[signal.SIGCHLD]'], {}), '(signalfd.SIG_BLOCK, [signal.SIGCHLD])\n', (5354, 5392), False, 'import signalfd\n'), ((5410, 5499), 'signalfd.signalfd', 'signalfd.signalfd', (['(0)', '[signal.SIGCHLD]', '(signalfd.SFD_NONBLOCK | signalfd.SFD_CLOEXEC)'], {}), '(0, [signal.SIGCHLD], signalfd.SFD_NONBLOCK | signalfd.\n SFD_CLOEXEC)\n', (5427, 5499), False, 'import signalfd\n'), ((6786, 6810), 'time.sleep', 'time.sleep', (['(TIMEOUT * 10)'], {}), '(TIMEOUT * 10)\n', (6796, 6810), False, 'import time\n'), ((5079, 5108), 'manhole.install', 'manhole.install', ([], {'sigmask': 'None'}), '(sigmask=None)\n', (5094, 5108), False, 'import manhole\n'), ((5143, 5184), 'manhole.install', 'manhole.install', ([], {'sigmask': '[signal.SIGCHLD]'}), '(sigmask=[signal.SIGCHLD])\n', (5158, 5184), False, 'import manhole\n'), ((5589, 5598), 'os.fork', 'os.fork', ([], {}), '()\n', (5596, 5598), False, 'import os\n'), ((6925, 6942), 'manhole.install', 'manhole.install', ([], {}), '()\n', (6940, 6942), False, 'import manhole\n'), ((6955, 6979), 'time.sleep', 'time.sleep', (['(TIMEOUT * 10)'], {}), '(TIMEOUT * 10)\n', (6965, 6979), False, 'import time\n'), ((6762, 6773), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (6770, 6773), False, 'import os\n'), ((7038, 7078), 'manhole.install', 'manhole.install', ([], {'socket_path': 'SOCKET_PATH'}), '(socket_path=SOCKET_PATH)\n', (7053, 7078), False, 'import manhole\n'), ((7091, 7115), 'time.sleep', 'time.sleep', (['(TIMEOUT * 10)'], {}), '(TIMEOUT * 10)\n', (7101, 7115), False, 'import time\n'), ((5832, 5864), 'select.select', 'select.select', (['[fd]', '[]', '[fd]', '(1)'], {}), '([fd], [], [fd], 1)\n', (5845, 5864), False, 'import select\n'), ((7180, 7219), 'manhole.install', 'manhole.install', ([], {'daemon_connection': '(True)'}), '(daemon_connection=True)\n', (7195, 7219), False, 'import manhole\n'), ((7232, 7251), 'time.sleep', 'time.sleep', (['TIMEOUT'], {}), '(TIMEOUT)\n', (7242, 7251), False, 'import time\n'), ((7320, 7360), 'manhole.install', 'manhole.install', ([], {'socket_path': 'SOCKET_PATH'}), '(socket_path=SOCKET_PATH)\n', (7335, 7360), False, 'import manhole\n'), ((7373, 7392), 'time.sleep', 'time.sleep', (['TIMEOUT'], {}), '(TIMEOUT)\n', (7383, 7392), False, 'import time\n'), ((6549, 6562), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6559, 6562), False, 'import time\n'), ((7468, 7541), 'manhole.install', 'manhole.install', ([], {'socket_path': 'SOCKET_PATH', 'locals': "{'k1': 'v1', 'k2': 'v2'}"}), "(socket_path=SOCKET_PATH, locals={'k1': 'v1', 'k2': 'v2'})\n", (7483, 7541), False, 'import manhole\n'), ((7582, 7601), 'time.sleep', 'time.sleep', (['TIMEOUT'], {}), '(TIMEOUT)\n', (7592, 7601), False, 'import time\n'), ((7666, 7714), 'manhole.install', 'manhole.install', ([], {'locals': "{'k1': 'v1', 'k2': 'v2'}"}), "(locals={'k1': 'v1', 'k2': 'v2'})\n", (7681, 7714), False, 'import manhole\n'), ((7807, 7847), 'manhole.install', 'manhole.install', ([], {'socket_path': 'SOCKET_PATH'}), '(socket_path=SOCKET_PATH)\n', (7822, 7847), False, 'import manhole\n'), ((7860, 7879), 'time.sleep', 'time.sleep', (['TIMEOUT'], {}), '(TIMEOUT)\n', (7870, 7879), False, 'import time\n'), ((6104, 6120), 'os.read', 'os.read', (['fd', '(128)'], {}), '(fd, 128)\n', (6111, 6120), False, 'import os\n'), ((7951, 8014), 'manhole.install', 'manhole.install', ([], {'socket_path': 'SOCKET_PATH', 'redirect_stderr': '(False)'}), '(socket_path=SOCKET_PATH, redirect_stderr=False)\n', (7966, 8014), False, 'import manhole\n'), ((8027, 8046), 'time.sleep', 'time.sleep', (['TIMEOUT'], {}), '(TIMEOUT)\n', (8037, 8046), False, 'import time\n'), ((8101, 8167), 'manhole.install', 'manhole.install', ([], {'socket_path': 'SOCKET_PATH', 'sigmask': '[signal.SIGUSR1]'}), '(socket_path=SOCKET_PATH, sigmask=[signal.SIGUSR1])\n', (8116, 8167), False, 'import manhole\n'), ((8180, 8199), 'time.sleep', 'time.sleep', (['TIMEOUT'], {}), '(TIMEOUT)\n', (8190, 8199), False, 'import time\n'), ((8226, 8243), 'manhole.install', 'manhole.install', ([], {}), '()\n', (8241, 8243), False, 'import manhole\n'), ((8256, 8271), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (8266, 8271), False, 'import time\n'), ((8378, 8402), 'time.sleep', 'time.sleep', (['(TIMEOUT * 10)'], {}), '(TIMEOUT * 10)\n', (8388, 8402), False, 'import time\n'), ((8470, 8483), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8480, 8483), False, 'import time\n'), ((8516, 8528), 'os.forkpty', 'os.forkpty', ([], {}), '()\n', (8526, 8528), False, 'import os\n'), ((9267, 9291), 'time.sleep', 'time.sleep', (['(TIMEOUT * 10)'], {}), '(TIMEOUT * 10)\n', (9277, 9291), False, 'import time\n'), ((9356, 9369), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (9366, 9369), False, 'import time\n'), ((8682, 8709), 'os.kill', 'os.kill', (['pid', 'signal.SIGINT'], {}), '(pid, signal.SIGINT)\n', (8689, 8709), False, 'import os\n'), ((8738, 8753), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (8748, 8753), False, 'import time\n'), ((8782, 8810), 'os.kill', 'os.kill', (['pid', 'signal.SIGTERM'], {}), '(pid, signal.SIGTERM)\n', (8789, 8810), False, 'import os\n'), ((8979, 9006), 'os.waitpid', 'os.waitpid', (['pid', 'os.WNOHANG'], {}), '(pid, os.WNOHANG)\n', (8989, 9006), False, 'import os\n'), ((9080, 9103), 'os.read', 'os.read', (['masterfd', '(1024)'], {}), '(masterfd, 1024)\n', (9087, 9103), False, 'import os\n')]
|
from django.contrib import admin
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from . import models
if settings.HAS_ADDITIONAL_USER_DATA:
try:
class UserProfileInline(admin.TabularInline):
model = models.UserProfile
extra = 0
except (Exception, KeyError) as e:
raise ImproperlyConfigured("User/admin.py:: Multi Vendor is turned on.")
class UserAdmin(admin.ModelAdmin):
list_display = ['get_full_name', 'email', 'is_verified']
search_fields = ['get_full_name', 'email', 'date_joined', 'username']
list_filter = ('groups',)
if settings.HAS_ADDITIONAL_USER_DATA:
inlines = [ UserProfileInline, ]
def save_model(self, request, obj, form, change):
if 'password' in form.changed_data:
obj.set_password(request.POST['password'])
obj.save()
admin.site.register(models.User, UserAdmin)
admin.site.register(models.IpAddress)
admin.site.register(models.CityFromIpAddress)
admin.site.register(models.Marketing)
|
[
"django.contrib.admin.site.register",
"django.core.exceptions.ImproperlyConfigured"
] |
[((889, 932), 'django.contrib.admin.site.register', 'admin.site.register', (['models.User', 'UserAdmin'], {}), '(models.User, UserAdmin)\n', (908, 932), False, 'from django.contrib import admin\n'), ((933, 970), 'django.contrib.admin.site.register', 'admin.site.register', (['models.IpAddress'], {}), '(models.IpAddress)\n', (952, 970), False, 'from django.contrib import admin\n'), ((971, 1016), 'django.contrib.admin.site.register', 'admin.site.register', (['models.CityFromIpAddress'], {}), '(models.CityFromIpAddress)\n', (990, 1016), False, 'from django.contrib import admin\n'), ((1017, 1054), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Marketing'], {}), '(models.Marketing)\n', (1036, 1054), False, 'from django.contrib import admin\n'), ((361, 427), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""User/admin.py:: Multi Vendor is turned on."""'], {}), "('User/admin.py:: Multi Vendor is turned on.')\n", (381, 427), False, 'from django.core.exceptions import ImproperlyConfigured\n')]
|
from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer
from client.config import config as c, language as l
from discord.ext import commands, tasks
from client.external.hiscores import hiscores_xp
from PIL import Image, ImageDraw, ImageFont
import discord, locale
class xp_tracker(commands.Cog):
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
name = 'xp_tracker'
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
@staticmethod
async def fun_xptracker(ctx):
try:
path = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
guild_l = await origin.get_language(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
if ctx.message.content == '.xptracker':
path3 = c.ORIGIN_PATH['embed.tracker.json']
json_string = await json_manager.get_json(path3)
new_json_string = {'data': []}
for key, value in json_string[guild_l]['xp_tracker']['tracker'].items():
if int(key) == 1:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
else:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, c.CLIENT_ICON, l.xp_tracker[guild_l]['embed_1'].format(ctx.guild.name), new_json_string['data'], False)
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
except Exception as error:
await exception.error(error)
async def fun_addxpevent(self, ctx):
try:
path1 = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
guild_l = await origin.get_language(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path1, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
STRING = str(ctx.message.content).split(' ')
if len(STRING) >= 9:
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
server_config = await json_manager.get_json(path)
LIST1 = self.PRE
LIST2 = self.NAME
LIST3 = self.ICON
DATA1 = await json_manager.get_data(path2)
ID = await origin.randomize()
STATUS = True
STATUS2 = False
while STATUS:
for data in DATA1:
if data['id'] == ID:
STATUS2 = True
if not STATUS2:
STATUS = False
else:
ID = await origin.randomize()
EXTRA = ''
NAME = ''
for value in LIST2:
if str(value).lower() == STRING[2].lower():
NAME = str(value)
for index, event in enumerate(LIST1):
if STRING[2] == event:
RUSH = None
if STRING[1].isdigit() and int(STRING[1]) > 1:
RUSH = l.xp_tracker[guild_l]['configuration']['rush_point'].format(locale.format_string('%d', int(STRING[1]), grouping=True))
path4 = c.ORIGIN_PATH['embed.tracker.json']
DESCRIPTION = l.xp_tracker[guild_l]['description_1'].format(
ctx.author.mention,
STRING[4], STRING[6], NAME, STRING[5] if not RUSH else l.xp_tracker[guild_l]['extra_4'], STRING[7] if not RUSH else l.xp_tracker[guild_l]['extra_4'], RUSH if RUSH else ''
)
if len(STRING) >= 8:
for value in STRING[8:]:
EXTRA += '{} '.format(value)
json_string = await json_manager.get_json(path4)
new_json_string = {'data': []}
for key, value in json_string[guild_l]['xp_tracker']['addevent'].items():
if int(key) == 1:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): str(value['value']).format(EXTRA)
})
if int(key) == 2:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
if STRING[1].isdigit():
mode_type = 0
if int(STRING[1]) == c.EVENT_MODE[0]:
mode_type = 1
elif int(STRING[1]) >= c.EVENT_MODE[1]:
mode_type = 2
EVENT_CHANNEL = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['events'])
embed = await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, LIST3[index], l.xp_tracker[guild_l]['embed_2'].format(ctx.guild.name), new_json_string['data'], False, False, EVENT_CHANNEL, DESCRIPTION)
json_string = {'id': ID, 'user_id': ctx.author.id, 'message_id': embed.id, 'event_name': STRING[2], 'xp_target': int(STRING[1]), 'prize_count': int(STRING[3]), 'date_start': STRING[4], 'date_end': STRING[5], 'time_start': int(STRING[6]), 'time_end': int(STRING[7]), 'participants': 0, 'status': 0, 'type': mode_type, 'win_message': 0}
await json_manager.create(path2, json_string)
await ctx.author.send(l.xp_tracker[guild_l]['msg_success_1'])
CHANNEL1 = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['chat0'])
CHANNEL2 = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['chat1'])
if CHANNEL1:
await CHANNEL1.send(l.xp_tracker[guild_l]['msg_post_1'].format(NAME, server_config['events']))
if CHANNEL2:
await CHANNEL2.send(l.xp_tracker[guild_l]['msg_post_1'].format(NAME, server_config['events']))
else:
await ctx.author.send(l.xp_tracker[guild_l]['msg_badformat_1'])
else:
await ctx.author.send(l.xp_tracker[guild_l]['msg_badformat_1'])
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
except Exception as error:
await exception.error(error)
@staticmethod
async def fun_removeallxp(ctx, system=None):
try:
guild_l = await origin.get_language(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path1 = c.GUILD_PATH['special_member.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path2 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path3 = c.GUILD_PATH['event.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
LIST1 = await json_manager.get_data(path3)
NEW_LIST1 = {'data': []}
NEW_LIST2 = {'data': []}
if hasattr(ctx, 'guild'):
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path1, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
for data in LIST1:
if data['type'] == 0 and data['status'] == 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 3 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 4 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
await json_manager.clear_and_update(path2, NEW_LIST1)
await json_manager.clear_and_update(path3, NEW_LIST2)
await ctx.author.send(l.xp_tracker[guild_l]['msg_success_2'])
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
elif system == 1:
if LIST1:
for data in LIST1:
if data['type'] == 0 and data['status'] == 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 3 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 4 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
await json_manager.clear_and_update(path2, NEW_LIST1)
await json_manager.clear_and_update(path3, NEW_LIST2)
except Exception as error:
await exception.error(error)
async def fun_axp(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
STRING = str(ctx.message.content).split(' ')
if len(STRING) >= 2:
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path1 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
server_config = await json_manager.get_json(path)
LIST1 = await json_manager.get_data(path1)
LIST2 = await json_manager.get_data(path2)
CHECK = True
user = self.client.get_user(ctx.author.id)
STATUS1 = False
STATUS2 = False
EVENT_NAME = []
EVENT_LIST_DATA = []
SAFE_CHECK = 0
userName = ''
for name in STRING[1:]:
userName += '{} '.format(name)
userName = userName.replace('_', ' ')
userName = userName.rstrip()
for value in LIST1:
if value['user_id'] == ctx.author.id or value['user_rsn'] == userName:
STATUS1 = True
if not STATUS1:
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
STATUS2 = True
EVENT_NAME.append(value2['event_name'])
SUM = value2['participants'] + 1
EVENT_LIST_DATA.append({'id': value2['id'], 'type': value2['type'], 'sum': SUM})
if STATUS2:
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
USERNAME = USERNAME.replace('%20', ' ')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
json_string = {'user_id': ctx.author.id, 'user_username': ctx.author.mention, 'user_rsn': userName}
for value in EVENT_NAME:
json_string.update({value: USER.stats[value]['experience']})
json_string.update({'{}_current'.format(value): USER.stats[value]['experience']})
await json_manager.create(path1, json_string)
for event_data in EVENT_LIST_DATA:
await json_manager.update(path2, 'id', event_data['id'], 'participants', event_data['sum'])
path4 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
role_id = await ini_manager.get_data('SECTION1', 'EVENT_ROLE', path4)
role = await discord_manager.get_role(self.client, ctx.guild.id, int(role_id))
if role:
user = await discord_manager.get_member(self.client, ctx.guild.id, ctx.author.id)
await user.add_roles(role, reason='{}'.format(c.DISCORD_MESSAGES['event_role_added']), atomic=True)
await ctx.send(l.xp_tracker[guild_l]['msg_1'].format(USERNAME, server_config['events']))
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
await user.send(l.xp_tracker[guild_l]['msg_2'])
else:
EVENT_STATUS = False
MEMBER_DATA = None
for MEMBER in LIST1:
if ctx.author.id == MEMBER['user_id']:
MEMBER_DATA = MEMBER
for EVENT in LIST2:
for key, value in MEMBER_DATA.items():
if (EVENT['type'] == 1 or EVENT['type'] == 2) and key == EVENT['event_name']:
EVENT_STATUS = True
if not EVENT_STATUS and (EVENT['type'] == 1 or EVENT['type'] == 2):
EVENT_STATUS = False
CHECK = True
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
target_keys = ['{}'.format(EVENT['event_name']), '{}_current'.format(EVENT['event_name'])]
target_values = [USER.stats[EVENT['event_name']]['experience'], USER.stats[EVENT['event_name']]['experience']]
await json_manager.update(path1, 'user_id', ctx.author.id, target_keys, target_values)
await user.send(l.xp_tracker[guild_l]['msg_6'].format(str(EVENT['event_name']).capitalize()))
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
EVENT_NAME.append(value2['event_name'])
SUM = value2['participants'] + 1
EVENT_LIST_DATA.append({'id': value2['id'], 'type': value2['type'], 'sum': SUM})
for event_data in EVENT_LIST_DATA:
await json_manager.update(path2, 'id', event_data['id'], 'participants', event_data['sum'])
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
EVENT_STATUS = False
await user.send(l.xp_tracker[guild_l]['msg_7'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_badformat_2'].format(ctx.author.mention))
except Exception as error:
await exception.error(error)
async def fun_xpupdate(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
guild_t = await origin.get_region(ctx.guild.id)
path1 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
LIST1 = await json_manager.get_data(path1)
LIST2 = await json_manager.get_data(path2)
CHECK = True
user = self.client.get_user(ctx.author.id)
guild_current = await server_timer.get_current_time(guild_t)
STATUS1 = False
STATUS2 = False
EVENT_NAME = []
SAFE_CHECK = 0
MEMBER = None
userName = ''
for value in LIST1:
if value['user_id'] == ctx.author.id:
STATUS1 = True
userName = value['user_rsn']
MEMBER = value
if STATUS1:
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
STATUS2 = True
EVENT_NAME.append(value2['event_name'])
if STATUS2:
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
for value in EVENT_NAME:
await json_manager.update(path1, 'user_id', ctx.author.id, '{}_current'.format(value), USER.stats[value]['experience'])
client_message = 'Guild id: {} | Event: {} | RSN: {} | Registration XP: {} | Current XP: {} | Guild time: {} | Status: {}'.format(ctx.guild.id, value, userName, MEMBER[value], USER.stats[value]['experience'], guild_current.strftime('%H:%M'), 'XP self update')
await console_interface.console_message('XP self update', client_message)
await user.send(l.xp_tracker[guild_l]['msg_success_4'])
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_3'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_4'].format(userName))
else:
await user.send(l.xp_tracker[guild_l]['msg_2'])
else:
await user.send(l.xp_tracker[guild_l]['msg_error_5'])
except Exception as error:
await exception.error(error)
async def fun_xprank(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path1 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
path3 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
ini = await ini_manager.get_ini(path1)
LIST1 = self.PNG
LIST2 = self.PRE
INFO_PANEL_IMAGE = self.INFO_PANEL_IMAGE
INFO_PANEL_FIRST_IMAGE = self.INFO_PANEL_FIRST_IMAGE
INFO_PANEL_SECOND_IMAGE = self.INFO_PANEL_SECOND_IMAGE
INFO_PANEL_THIRD_IMAGE = self.INFO_PANEL_THIRD_IMAGE
COLOR_PLACE_FIRST = (255, 30, 215)
COLOR_PLACE_SECOND = (0, 174, 255)
COLOR_PLACE_THIRD = (255, 31, 31)
COLOR_PLACE_DEFAULT = (0, 239, 0)
FONT_PATH = self.FONT_PATH
INFO_PANEL_OBJECT = None
RANK = 0
sum = None
CHANNEL_PERMISSIONS = int(ini['CHANNEL_PERMISSIONS']['STATUS'])
server_config = await json_manager.get_json(path)
CHANNEL_STATUS = True
if CHANNEL_PERMISSIONS == 1:
pass
else:
if ctx.message.channel.id == server_config['chat0']:
CHANNEL_STATUS = False
if CHANNEL_STATUS:
STRING = str(ctx.message.content).split(' ')
def get_id(data_value):
return int(data_value.get('sum'))
if len(STRING) == 1:
user = self.client.get_user(ctx.author.id)
else:
DCID = await origin.find_and_replace(STRING[1])
user = self.client.get_user(DCID)
TEMP_DATA = await json_manager.get_data(path2)
DATA1 = []
DATA2 = await json_manager.get_data(path3)
DATA3 = []
STATUS = None
for value in TEMP_DATA:
if value['type'] == 1 or value['type'] == 2:
DATA1.append(value)
if DATA1:
for index, data in enumerate(DATA1):
if DATA2:
for index2, data2 in enumerate(DATA2):
for key, value in data2.items():
if str(data['event_name']) == str(key):
sum = data2['{}_current'.format(key)] - data2[key]
DATA3.append({'user_rsn': data2['user_rsn'], 'user_id': data2['user_id'], 'sum': sum})
for index3, value3 in enumerate(LIST2):
if str(value3) == str(key):
INFO_PANEL_OBJECT = LIST1[index3]
DATA3.sort(key=get_id, reverse=True)
for index3, data3 in enumerate(DATA3):
RANK += 1
if RANK == 1:
PLACE_IMAGE = INFO_PANEL_FIRST_IMAGE
PLACE_COLOR = COLOR_PLACE_FIRST
elif RANK == 2:
PLACE_IMAGE = INFO_PANEL_SECOND_IMAGE
PLACE_COLOR = COLOR_PLACE_SECOND
elif RANK == 3:
PLACE_IMAGE = INFO_PANEL_THIRD_IMAGE
PLACE_COLOR = COLOR_PLACE_THIRD
else:
PLACE_IMAGE = INFO_PANEL_IMAGE
PLACE_COLOR = COLOR_PLACE_DEFAULT
if hasattr(user, 'id'):
if user.id == data3['user_id']:
with Image.open(PLACE_IMAGE).convert('RGBA') as im:
with Image.open(INFO_PANEL_OBJECT).convert('RGBA') as im2:
size1 = im.size
size2 = im2.size
y = int(size1[1] / 2) - int(size2[1] / 2)
im.paste(im2, (18, y), im2)
draw = ImageDraw.Draw(im)
font = ImageFont.truetype(FONT_PATH, 16)
draw.text((50, y - 12), l.xp_tracker[guild_l]['configuration']['rsn'], PLACE_COLOR, font=font)
draw.text((50, y + 2), l.xp_tracker[guild_l]['configuration']['rank'], PLACE_COLOR, font=font)
draw.text((50, y + 18), l.xp_tracker[guild_l]['configuration']['xp'], PLACE_COLOR, font=font)
draw.text((110 if guild_l == 'LT' else 95, y - 12), '{}'.format(data3['user_rsn']), (255, 255, 255), font=font)
draw.text((130 if guild_l == 'LT' else 100, y + 2), '{}'.format(RANK), (255, 255, 255), font=font)
draw.text((98 if guild_l == 'LT' else 70, y + 18), '{} XP'.format(locale.format_string('%d', data3['sum'], grouping=True)), (255, 255, 255), font=font)
TEMP_FILE = '{}_{}_{}.png'.format(data3['user_rsn'], data['event_name'], sum)
im.save(TEMP_FILE, 'PNG')
rank = open(TEMP_FILE, 'rb')
await ctx.send(file=discord.File(rank))
rank.close()
await file_manager.delete_file(TEMP_FILE)
STATUS = True
if not STATUS:
await ctx.send(l.xp_tracker[guild_l]['msg_error_6'].format(ctx.author.mention))
RANK = 0
DATA3.clear()
else:
await ctx.send(l.xp_tracker[guild_l]['msg_4'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_5'])
else:
await ctx.send(l.module_permissions[guild_l]['msg_restricted'])
except Exception as error:
await exception.error(error)
async def fun_xpstats(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
server_config = await json_manager.get_json(path)
path1 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path1)
CHANNEL_PERMISSIONS = int(ini['CHANNEL_PERMISSIONS']['STATUS'])
CHANNEL_STATUS = True
if CHANNEL_PERMISSIONS == 1:
pass
else:
if ctx.message.channel.id == server_config['chat0']:
CHANNEL_STATUS = False
if CHANNEL_STATUS:
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
path3 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
LIST1 = self.ICON
LIST2 = self.PRE
IMAGE = None
EVENT_NAME = None
await origin.get_locale()
TEMP_DATA = await json_manager.get_data(path2)
DATA1 = []
DATA2 = await json_manager.get_data(path3)
DATA3 = []
for value in TEMP_DATA:
if value['type'] == 1 or value['type'] == 2:
DATA1.append(value)
def get_id(INFO):
return int(INFO.get('sum'))
if DATA1:
for data1 in DATA1:
if DATA2:
for data2 in DATA2:
for key, value in data2.items():
if str(key) == str(data1['event_name']):
sum = data2['{}_current'.format(key)]-data2[key]
DATA3.append({'user_username': data2['user_username'], 'user_rsn': data2['user_rsn'], 'sum': sum})
if data1['type'] == 1:
EVENT_NAME = '{} [ S ]'.format(str(data1['event_name']).capitalize())
if data1['type'] == 2:
EVENT_NAME = '{} [ R ]'.format(str(data1['event_name']).capitalize())
for index, value3 in enumerate(LIST2):
if str(value3) == str(key):
IMAGE = LIST1[index]
DATA3.sort(key=get_id, reverse=True)
path4 = c.ORIGIN_PATH['embed.tracker.json']
json_string = await json_manager.get_json(path4)
new_json_string = {'data': []}
STRING = ''
SUM = 0
for key, value in json_string[guild_l]['xp_tracker']['stats'].items():
if DATA3:
if int(key) == 1:
for index, data in enumerate(DATA3):
index = index + 1
if index <= 10:
if index == 1:
title = ':first_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
elif index == 2:
title = ':second_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
elif index == 3:
title = ':third_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
else:
title = '{}'.format(l.DISCORD_TOP[guild_l][index - 1])
STRING += l.xp_tracker[guild_l]['configuration']['current_xp'].format(title, data['user_username'], data['user_rsn'], locale.format_string('%d', data['sum'], grouping=True))
SUM += data['sum']
STRING += l.xp_tracker[guild_l]['configuration']['total_xp'].format(locale.format_string('%d', SUM, grouping=True))
new_json_string['data'].append({
'name{}'.format(key): value['name'].format('\u200D'),
'value{}'.format(key): str(value['value']).format(STRING)
})
else:
STRING += l.xp_tracker[guild_l]['configuration']['total_xp'].format(locale.format_string('%d', SUM, grouping=True))
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): str(value['value']).format(ctx.guild.name)
})
await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, IMAGE, l.xp_tracker[guild_l]['embed_3'].format(ctx.guild.name, EVENT_NAME), new_json_string['data'], False)
DATA3.clear()
else:
await ctx.send(l.xp_tracker[guild_l]['msg_4'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_5'])
else:
await ctx.send(l.module_permissions[guild_l]['msg_restricted'])
except Exception as error:
await exception.error(error)
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
@tasks.loop(count=1)
async def variable_init(self):
try:
path_global = c.GUILD_PATH['{}_g.ini'.format(self.name)]
ini = await ini_manager.get_ini(path_global)
self.PRE = await json_manager.get_ini_list(path_global, 'CONSTANT2', 'PRE')
self.NAME = await json_manager.get_ini_list(path_global, 'CONSTANT1', 'NAME')
self.ICON = await json_manager.get_ini_list(path_global, 'CONSTANT3', 'ICON')
self.PNG = await json_manager.get_ini_list(path_global, 'CONSTANT5', 'PNG')
self.INFO_PANEL_IMAGE = ini['CONSTANT5']['INFO_PANEL']
self.INFO_PANEL_FIRST_IMAGE = ini['CONSTANT5']['INFO_PANEL_FIRST']
self.INFO_PANEL_SECOND_IMAGE = ini['CONSTANT5']['INFO_PANEL_SECOND']
self.INFO_PANEL_THIRD_IMAGE = ini['CONSTANT5']['INFO_PANEL_THIRD']
self.FONT_PATH = ini['CONSTANT5']['FONT']
await console_interface.console_message(c.CLIENT_MESSAGES['variable_init'].format(self.name))
except Exception as error:
await exception.error(error)
def __init__(self, client):
self.PRE = None
self.NAME = None
self.ICON = None
self.PNG = None
self.INFO_PANEL_IMAGE = None
self.INFO_PANEL_FIRST_IMAGE = None
self.INFO_PANEL_SECOND_IMAGE = None
self.INFO_PANEL_THIRD_IMAGE = None
self.FONT_PATH = None
self.variable_init.start()
self.client = client
@commands.command()
async def xptracker(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xptracker)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def addxpevent(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_addxpevent)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def removeallxp(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_removeallxp)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def axp(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_axp)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xpupdate(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xpupdate)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xprank(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xprank)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xpstats(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xpstats)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
def setup(client):
client.add_cog(xp_tracker(client))
|
[
"locale.format_string",
"client.ini_manager.get_ini",
"client.external.hiscores.hiscores_xp.Hiscores",
"PIL.ImageDraw.Draw",
"client.json_manager.get_data",
"discord.ext.commands.command",
"client.exception.error",
"PIL.ImageFont.truetype",
"client.console_interface.console_message",
"client.json_manager.get_json",
"client.origin.find_and_replace",
"discord.ext.tasks.loop",
"client.origin.get_locale",
"client.origin.get_region",
"discord.File",
"client.server_timer.get_current_time",
"client.permissions.get_user_permission",
"client.discord_manager.get_channel",
"client.json_manager.create",
"client.origin.randomize",
"client.json_manager.update",
"client.json_manager.get_ini_list",
"client.discord_manager.get_member",
"client.json_manager.clear_and_update",
"discord.Color.dark_green",
"PIL.Image.open",
"client.origin.get_language",
"client.file_manager.delete_file",
"client.ini_manager.get_data"
] |
[((33610, 33629), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'count': '(1)'}), '(count=1)\n', (33620, 33629), False, 'from discord.ext import commands, tasks\n'), ((35103, 35121), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (35119, 35121), False, 'from discord.ext import commands, tasks\n'), ((35539, 35557), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (35555, 35557), False, 'from discord.ext import commands, tasks\n'), ((35977, 35995), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (35993, 35995), False, 'from discord.ext import commands, tasks\n'), ((36417, 36435), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (36433, 36435), False, 'from discord.ext import commands, tasks\n'), ((36841, 36859), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (36857, 36859), False, 'from discord.ext import commands, tasks\n'), ((37275, 37293), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (37291, 37293), False, 'from discord.ext import commands, tasks\n'), ((37705, 37723), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (37721, 37723), False, 'from discord.ext import commands, tasks\n'), ((654, 687), 'client.origin.get_language', 'origin.get_language', (['ctx.guild.id'], {}), '(ctx.guild.id)\n', (673, 687), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((2421, 2454), 'client.origin.get_language', 'origin.get_language', (['ctx.guild.id'], {}), '(ctx.guild.id)\n', (2440, 2454), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((8680, 8708), 'client.json_manager.get_data', 'json_manager.get_data', (['path3'], {}), '(path3)\n', (8701, 8708), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((10694, 10727), 'client.origin.get_language', 'origin.get_language', (['ctx.guild.id'], {}), '(ctx.guild.id)\n', (10713, 10727), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((17956, 17989), 'client.origin.get_language', 'origin.get_language', (['ctx.guild.id'], {}), '(ctx.guild.id)\n', (17975, 17989), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((18018, 18049), 'client.origin.get_region', 'origin.get_region', (['ctx.guild.id'], {}), '(ctx.guild.id)\n', (18035, 18049), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((18214, 18242), 'client.json_manager.get_data', 'json_manager.get_data', (['path1'], {}), '(path1)\n', (18235, 18242), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((18269, 18297), 'client.json_manager.get_data', 'json_manager.get_data', (['path2'], {}), '(path2)\n', (18290, 18297), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((18413, 18451), 'client.server_timer.get_current_time', 'server_timer.get_current_time', (['guild_t'], {}), '(guild_t)\n', (18442, 18451), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((20884, 20917), 'client.origin.get_language', 'origin.get_language', (['ctx.guild.id'], {}), '(ctx.guild.id)\n', (20903, 20917), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((21250, 21276), 'client.ini_manager.get_ini', 'ini_manager.get_ini', (['path1'], {}), '(path1)\n', (21269, 21276), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((22002, 22029), 'client.json_manager.get_json', 'json_manager.get_json', (['path'], {}), '(path)\n', (22023, 22029), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((27722, 27755), 'client.origin.get_language', 'origin.get_language', (['ctx.guild.id'], {}), '(ctx.guild.id)\n', (27741, 27755), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((27878, 27905), 'client.json_manager.get_json', 'json_manager.get_json', (['path'], {}), '(path)\n', (27899, 27905), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((28013, 28039), 'client.ini_manager.get_ini', 'ini_manager.get_ini', (['path1'], {}), '(path1)\n', (28032, 28039), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((33771, 33803), 'client.ini_manager.get_ini', 'ini_manager.get_ini', (['path_global'], {}), '(path_global)\n', (33790, 33803), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((33834, 33892), 'client.json_manager.get_ini_list', 'json_manager.get_ini_list', (['path_global', '"""CONSTANT2"""', '"""PRE"""'], {}), "(path_global, 'CONSTANT2', 'PRE')\n", (33859, 33892), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((33923, 33982), 'client.json_manager.get_ini_list', 'json_manager.get_ini_list', (['path_global', '"""CONSTANT1"""', '"""NAME"""'], {}), "(path_global, 'CONSTANT1', 'NAME')\n", (33948, 33982), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((34013, 34072), 'client.json_manager.get_ini_list', 'json_manager.get_ini_list', (['path_global', '"""CONSTANT3"""', '"""ICON"""'], {}), "(path_global, 'CONSTANT3', 'ICON')\n", (34038, 34072), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((34102, 34160), 'client.json_manager.get_ini_list', 'json_manager.get_ini_list', (['path_global', '"""CONSTANT5"""', '"""PNG"""'], {}), "(path_global, 'CONSTANT5', 'PNG')\n", (34127, 34160), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((840, 905), 'client.permissions.get_user_permission', 'permissions.get_user_permission', (['path', 'target_keys', 'target_values'], {}), '(path, target_keys, target_values)\n', (871, 905), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((2238, 2260), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (2253, 2260), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((2607, 2673), 'client.permissions.get_user_permission', 'permissions.get_user_permission', (['path1', 'target_keys', 'target_values'], {}), '(path1, target_keys, target_values)\n', (2638, 2673), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((8136, 8158), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (8151, 8158), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((10595, 10617), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (10610, 10617), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((11094, 11121), 'client.json_manager.get_json', 'json_manager.get_json', (['path'], {}), '(path)\n', (11115, 11121), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((11152, 11180), 'client.json_manager.get_data', 'json_manager.get_data', (['path1'], {}), '(path1)\n', (11173, 11180), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((11211, 11239), 'client.json_manager.get_data', 'json_manager.get_data', (['path2'], {}), '(path2)\n', (11232, 11239), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((17852, 17874), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (17867, 17874), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((20782, 20804), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (20797, 20804), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((22725, 22753), 'client.json_manager.get_data', 'json_manager.get_data', (['path2'], {}), '(path2)\n', (22746, 22753), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((22811, 22839), 'client.json_manager.get_data', 'json_manager.get_data', (['path3'], {}), '(path3)\n', (22832, 22839), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((27619, 27641), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (27634, 27641), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((28675, 28694), 'client.origin.get_locale', 'origin.get_locale', ([], {}), '()\n', (28692, 28694), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((28730, 28758), 'client.json_manager.get_data', 'json_manager.get_data', (['path2'], {}), '(path2)\n', (28751, 28758), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((28816, 28844), 'client.json_manager.get_data', 'json_manager.get_data', (['path3'], {}), '(path3)\n', (28837, 28844), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((33550, 33572), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (33565, 33572), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((34681, 34703), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (34696, 34703), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((35510, 35532), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (35525, 35532), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((35948, 35970), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (35963, 35970), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((36388, 36410), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (36403, 36410), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((36812, 36834), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (36827, 36834), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((37246, 37268), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (37261, 37268), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((37676, 37698), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (37691, 37698), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((38108, 38130), 'client.exception.error', 'exception.error', (['error'], {}), '(error)\n', (38123, 38130), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((1154, 1182), 'client.json_manager.get_json', 'json_manager.get_json', (['path3'], {}), '(path3)\n', (1175, 1182), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((3074, 3101), 'client.json_manager.get_json', 'json_manager.get_json', (['path'], {}), '(path)\n', (3095, 3101), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((3250, 3278), 'client.json_manager.get_data', 'json_manager.get_data', (['path2'], {}), '(path2)\n', (3271, 3278), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((3311, 3329), 'client.origin.randomize', 'origin.randomize', ([], {}), '()\n', (3327, 3329), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((8987, 9053), 'client.permissions.get_user_permission', 'permissions.get_user_permission', (['path1', 'target_keys', 'target_values'], {}), '(path1, target_keys, target_values)\n', (9018, 9053), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((9598, 9645), 'client.json_manager.clear_and_update', 'json_manager.clear_and_update', (['path2', 'NEW_LIST1'], {}), '(path2, NEW_LIST1)\n', (9627, 9645), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((9672, 9719), 'client.json_manager.clear_and_update', 'json_manager.clear_and_update', (['path3', 'NEW_LIST2'], {}), '(path3, NEW_LIST2)\n', (9701, 9719), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((10424, 10471), 'client.json_manager.clear_and_update', 'json_manager.clear_and_update', (['path2', 'NEW_LIST1'], {}), '(path2, NEW_LIST1)\n', (10453, 10471), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((10494, 10541), 'client.json_manager.clear_and_update', 'json_manager.clear_and_update', (['path3', 'NEW_LIST2'], {}), '(path3, NEW_LIST2)\n', (10523, 10541), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((19212, 19247), 'client.external.hiscores.hiscores_xp.Hiscores', 'hiscores_xp.Hiscores', (['USERNAME', '"""N"""'], {}), "(USERNAME, 'N')\n", (19232, 19247), False, 'from client.external.hiscores import hiscores_xp\n'), ((22601, 22635), 'client.origin.find_and_replace', 'origin.find_and_replace', (['STRING[1]'], {}), '(STRING[1])\n', (22624, 22635), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((1921, 1947), 'discord.Color.dark_green', 'discord.Color.dark_green', ([], {}), '()\n', (1945, 1947), False, 'import discord, locale\n'), ((12483, 12518), 'client.external.hiscores.hiscores_xp.Hiscores', 'hiscores_xp.Hiscores', (['USERNAME', '"""N"""'], {}), "(USERNAME, 'N')\n", (12503, 12518), False, 'from client.external.hiscores import hiscores_xp\n'), ((3726, 3744), 'client.origin.randomize', 'origin.randomize', ([], {}), '()\n', (3742, 3744), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((5009, 5037), 'client.json_manager.get_json', 'json_manager.get_json', (['path4'], {}), '(path4)\n', (5030, 5037), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((15627, 15662), 'client.external.hiscores.hiscores_xp.Hiscores', 'hiscores_xp.Hiscores', (['USERNAME', '"""N"""'], {}), "(USERNAME, 'N')\n", (15647, 15662), False, 'from client.external.hiscores import hiscores_xp\n'), ((30388, 30416), 'client.json_manager.get_json', 'json_manager.get_json', (['path4'], {}), '(path4)\n', (30409, 30416), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((6241, 6320), 'client.discord_manager.get_channel', 'discord_manager.get_channel', (['self.client', 'ctx.guild.id', "server_config['events']"], {}), "(self.client, ctx.guild.id, server_config['events'])\n", (6268, 6320), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((6998, 7037), 'client.json_manager.create', 'json_manager.create', (['path2', 'json_string'], {}), '(path2, json_string)\n', (7017, 7037), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((7182, 7260), 'client.discord_manager.get_channel', 'discord_manager.get_channel', (['self.client', 'ctx.guild.id', "server_config['chat0']"], {}), "(self.client, ctx.guild.id, server_config['chat0'])\n", (7209, 7260), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((7310, 7388), 'client.discord_manager.get_channel', 'discord_manager.get_channel', (['self.client', 'ctx.guild.id', "server_config['chat1']"], {}), "(self.client, ctx.guild.id, server_config['chat1'])\n", (7337, 7388), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((33000, 33026), 'discord.Color.dark_green', 'discord.Color.dark_green', ([], {}), '()\n', (33024, 33026), False, 'import discord, locale\n'), ((6399, 6425), 'discord.Color.dark_green', 'discord.Color.dark_green', ([], {}), '()\n', (6423, 6425), False, 'import discord, locale\n'), ((13211, 13250), 'client.json_manager.create', 'json_manager.create', (['path1', 'json_string'], {}), '(path1, json_string)\n', (13230, 13250), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((13613, 13666), 'client.ini_manager.get_data', 'ini_manager.get_data', (['"""SECTION1"""', '"""EVENT_ROLE"""', 'path4'], {}), "('SECTION1', 'EVENT_ROLE', path4)\n", (13633, 13666), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((19948, 20015), 'client.console_interface.console_message', 'console_interface.console_message', (['"""XP self update"""', 'client_message'], {}), "('XP self update', client_message)\n", (19981, 20015), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((13368, 13457), 'client.json_manager.update', 'json_manager.update', (['path2', '"""id"""', "event_data['id']", '"""participants"""', "event_data['sum']"], {}), "(path2, 'id', event_data['id'], 'participants',\n event_data['sum'])\n", (13387, 13457), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((13881, 13949), 'client.discord_manager.get_member', 'discord_manager.get_member', (['self.client', 'ctx.guild.id', 'ctx.author.id'], {}), '(self.client, ctx.guild.id, ctx.author.id)\n', (13907, 13949), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((16163, 16248), 'client.json_manager.update', 'json_manager.update', (['path1', '"""user_id"""', 'ctx.author.id', 'target_keys', 'target_values'], {}), "(path1, 'user_id', ctx.author.id, target_keys, target_values\n )\n", (16182, 16248), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((32064, 32110), 'locale.format_string', 'locale.format_string', (['"""%d"""', 'SUM'], {'grouping': '(True)'}), "('%d', SUM, grouping=True)\n", (32084, 32110), False, 'import discord, locale\n'), ((32578, 32624), 'locale.format_string', 'locale.format_string', (['"""%d"""', 'SUM'], {'grouping': '(True)'}), "('%d', SUM, grouping=True)\n", (32598, 32624), False, 'import discord, locale\n'), ((16955, 17044), 'client.json_manager.update', 'json_manager.update', (['path2', '"""id"""', "event_data['id']", '"""participants"""', "event_data['sum']"], {}), "(path2, 'id', event_data['id'], 'participants',\n event_data['sum'])\n", (16974, 17044), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((25416, 25434), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (25430, 25434), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((25490, 25523), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['FONT_PATH', '(16)'], {}), '(FONT_PATH, 16)\n', (25508, 25523), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((24914, 24937), 'PIL.Image.open', 'Image.open', (['PLACE_IMAGE'], {}), '(PLACE_IMAGE)\n', (24924, 24937), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((26941, 26976), 'client.file_manager.delete_file', 'file_manager.delete_file', (['TEMP_FILE'], {}), '(TEMP_FILE)\n', (26965, 26976), False, 'from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer\n'), ((31832, 31886), 'locale.format_string', 'locale.format_string', (['"""%d"""', "data['sum']"], {'grouping': '(True)'}), "('%d', data['sum'], grouping=True)\n", (31852, 31886), False, 'import discord, locale\n'), ((25010, 25039), 'PIL.Image.open', 'Image.open', (['INFO_PANEL_OBJECT'], {}), '(INFO_PANEL_OBJECT)\n', (25020, 25039), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((26374, 26429), 'locale.format_string', 'locale.format_string', (['"""%d"""', "data3['sum']"], {'grouping': '(True)'}), "('%d', data3['sum'], grouping=True)\n", (26394, 26429), False, 'import discord, locale\n'), ((26806, 26824), 'discord.File', 'discord.File', (['rank'], {}), '(rank)\n', (26818, 26824), False, 'import discord, locale\n')]
|
# Copyright © 2019. <NAME>. All rights reserved.
import numpy as np
import pandas as pd
from collections import OrderedDict
import math
import warnings
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist
from scipy.stats import chi2
from scipy.ndimage.filters import gaussian_filter1d
from .utils import Epoch
from .utils import printProgressBar, get_spike_positions
def calculate_metrics(spike_times, spike_clusters, amplitudes, pc_features, pc_feature_ind, params,
duration, channel_locations=None, cluster_ids=None, epochs=None, seed=None, verbose=True):
""" Calculate metrics for all units on one probe
Inputs:
------
spike_times : numpy.ndarray (num_spikes x 0)
Spike times in seconds (same timebase as epochs)
spike_clusters : numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
pc_features : numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind : numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
epochs : list of Epoch objects
contains information on Epoch start and stop times
duration : length of recording (seconds)
channel_locations : numpy.ndarray (num_channels x 2)
Channel locations (if None, a linear geometry is assumed)
params : dict of parameters
'isi_threshold' : minimum time for isi violations
'min_isi'
'num_channels_to_compare'
'max_spikes_for_unit'
'max_spikes_for_nn'
'n_neighbors'
'drift_metrics_interval_s'
'drift_metrics_min_spikes_per_interval'
Outputs:
--------
metrics : pandas.DataFrame
one column for each metric
one row per unit per epoch
"""
metrics = pd.DataFrame()
if epochs is None:
epochs = [Epoch('complete_session', 0, np.inf)]
total_units = np.max(spike_clusters) + 1
total_epochs = len(epochs)
for epoch in epochs:
in_epoch = np.logical_and(spike_times >= epoch.start_time, spike_times < epoch.end_time)
spikes_in_epoch = np.sum(in_epoch)
spikes_for_nn = min(spikes_in_epoch, params['max_spikes_for_nn'])
spikes_for_silhouette = min(spikes_in_epoch, params['n_silhouette'])
print("Calculating isi violations")
isi_viol = calculate_isi_violations(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
isi_threshold=params['isi_threshold'],
min_isi=params['min_isi'],
duration=duration,
verbose=verbose)
print("Calculating presence ratio")
presence_ratio = calculate_presence_ratio(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
duration=duration, verbose=verbose)
print("Calculating firing rate")
firing_rate = calculate_firing_rates(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units, duration=duration, verbose=verbose)
print("Calculating amplitude cutoff")
amplitude_cutoff = calculate_amplitude_cutoff(spike_clusters=spike_clusters[in_epoch],
amplitudes=amplitudes[in_epoch],
total_units=total_units,
verbose=verbose)
print("Calculating PC-based metrics")
isolation_distance, l_ratio, d_prime, nn_hit_rate, nn_miss_rate = \
calculate_pc_metrics(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
num_channels_to_compare=params['num_channels_to_compare'],
max_spikes_for_cluster=params['max_spikes_for_unit'],
spikes_for_nn=spikes_for_nn,
n_neighbors=params['n_neighbors'],
channel_locations=
channel_locations,
seed=seed,
verbose=verbose)
print("Calculating silhouette score")
silhouette_score = calculate_silhouette_score(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
spikes_for_silhouette=spikes_for_silhouette,
seed=seed, verbose=verbose)
print("Calculating drift metrics")
max_drift, cumulative_drift = calculate_drift_metrics(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
interval_length=params['drift_metrics_interval_s'],
min_spikes_per_interval=
params['drift_metrics_min_spikes_per_interval'],
channel_locations=
channel_locations,
verbose=verbose)
if cluster_ids is None:
cluster_ids_out = np.arange(total_units)
else:
cluster_ids_out = cluster_ids
epoch_name = [epoch.name] * len(cluster_ids_out)
metrics = pd.concat((metrics, pd.DataFrame(data=OrderedDict((('cluster_id', cluster_ids_out),
('firing_rate', firing_rate),
('presence_ratio', presence_ratio),
('isi_violation', isi_viol),
('amplitude_cutoff', amplitude_cutoff),
('isolation_distance', isolation_distance),
('l_ratio', l_ratio),
('d_prime', d_prime),
('nn_hit_rate', nn_hit_rate),
('nn_miss_rate', nn_miss_rate),
('silhouette_score', silhouette_score),
('max_drift', max_drift),
('cumulative_drift', cumulative_drift),
('epoch_name', epoch_name),
)))))
return metrics
# ===============================================================
# HELPER FUNCTIONS TO LOOP THROUGH CLUSTERS:
# ===============================================================
def calculate_isi_violations(spike_times, spike_clusters, total_units, isi_threshold, min_isi, duration,
spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
viol_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
viol_rates[cluster_id], num_violations = isi_violations(spike_times[for_this_cluster],
duration=duration,
isi_threshold=isi_threshold,
min_isi=min_isi)
return viol_rates
def calculate_presence_ratio(spike_times, spike_clusters, total_units, duration, spike_cluster_subset=None,
verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
ratios = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
ratios[cluster_id] = presence_ratio(spike_times[for_this_cluster],
duration=duration)
return ratios
def calculate_num_spikes(spike_times, spike_clusters, total_units, spike_cluster_subset=None, verbose=True):
num_spikes = np.zeros((total_units,))
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
num_spikes[cluster_id] = len(spike_times[for_this_cluster])
return num_spikes
def calculate_firing_rates(spike_times, spike_clusters, total_units, duration, spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
firing_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
firing_rates[cluster_id] = firing_rate(spike_times[for_this_cluster],
duration=duration)
return firing_rates
def calculate_amplitude_cutoff(spike_clusters, amplitudes, total_units, spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
amplitude_cutoffs = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
amplitude_cutoffs[cluster_id] = amplitude_cutoff(amplitudes[for_this_cluster])
return amplitude_cutoffs
def calculate_pc_metrics(spike_clusters, total_units, pc_features, pc_feature_ind,
num_channels_to_compare, max_spikes_for_cluster, spikes_for_nn,
n_neighbors, channel_locations, min_num_pcs=10, metric_names=None,
seed=None, spike_cluster_subset=None, verbose=True):
"""
Computes metrics from projection of waveforms to principal components
including: isolation distance, l ratio, d prime, nn hit rate, nn miss rate
Parameters
----------
spike_clusters: numpy.ndarray (num_spikes,)
Unit ID for each spike time
total_units: int
Total number of units
pc_features: numpy.ndarray (num_spikes, num_pcs, num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind: numpy.ndarray (num_units, num_channels)
Channel indices of PCs for each unit
num_channels_to_compare: int
Number of channels around the max channel over which to compute the
metrics (e.g. only units from these channels will be considered for the
nearest neighbor metrics)
max_spikes_for_cluster: int
Total number of spikes to use for computing the metrics
spikes_for_nn: int
Number of spikes in a unit to use for computing nearest neighbor metrics
(nn_hit_rate, nn_miss_rate)
n_neighbors: int
Number of nearest neighbor spikes to compare membership
channel_locations: array, (channels, 2)
(x,y) location of channels; used to identify neighboring channels
min_num_pcs: int, default=10
Minimum number of spikes a unit must have to compute these metrics
metric_names: list of str, default=None
List of metrics to compute
seed: int, default=None
Random seed for subsampling spikes from the unit
spike_cluster_subset: numpy.array (units,), default=None
If specified compute metrics for only these units
verbose: bool, default=True
Prints out progress bar if True
Returns (all 1d numpy.arrays)
-------
isolation_distances
l_ratios
d_primes
nn_hit_rates
nn_miss_rates
"""
if metric_names is None:
metric_names = ['isolation_distance', 'l_ratio', 'd_prime', 'nearest_neighbor']
if num_channels_to_compare > channel_locations.shape[0]:
num_channels_to_compare = channel_locations.shape[0]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
peak_channels = np.zeros((total_units,), dtype='uint16')
neighboring_channels = np.zeros((total_units, num_channels_to_compare))
isolation_distances = np.zeros((total_units,))
l_ratios = np.zeros((total_units,))
d_primes = np.zeros((total_units,))
nn_hit_rates = np.zeros((total_units,))
nn_miss_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(all_cluster_ids):
for_unit = np.squeeze(spike_clusters == cluster_id)
pc_max = np.argmax(np.mean(pc_features[for_unit, 0, :], 0))
peak_channels[idx] = pc_feature_ind[idx, pc_max]
# find neighboring channels
neighboring_channels[idx] = find_neighboring_channels(pc_feature_ind[idx, pc_max],
pc_feature_ind[idx, :],
num_channels_to_compare,
channel_locations)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(idx + 1, total_units)
peak_channel = peak_channels[idx]
# units_for_channel: index (not ID) of units defined at the target unit's peak channel
units_for_channel, channel_index = np.unravel_index(np.where(pc_feature_ind.flatten() == peak_channel)[0],
pc_feature_ind.shape)
# units_in_range: list of bool, True for units whose peak channels are in the neighborhood of target unit
units_in_range = [channel in neighboring_channels[idx] for channel in peak_channels[units_for_channel]]
channels_to_use = neighboring_channels[idx]
# only get index of units who are in the neighborhood of target unit
units_for_channel = units_for_channel[units_in_range]
spike_counts = np.zeros(units_for_channel.shape)
for idx2, cluster_id2 in enumerate(units_for_channel):
spike_counts[idx2] = np.sum(spike_clusters == all_cluster_ids[cluster_id2])
# index of target unit within the subset of units in its neighborhood (including itself)
this_unit_idx = np.where(units_for_channel == idx)[0]
if spike_counts[this_unit_idx] > max_spikes_for_cluster:
relative_counts = spike_counts / spike_counts[this_unit_idx] * max_spikes_for_cluster
else:
relative_counts = spike_counts
all_pcs = np.zeros((0, pc_features.shape[1], channels_to_use.size))
all_labels = np.zeros((0,))
for idx2, cluster_id2 in enumerate(units_for_channel):
try:
channel_mask = make_channel_mask(cluster_id2, pc_feature_ind, channels_to_use)
except IndexError:
# Occurs when pc_feature_ind does not contain all channels of interest
# In that case, we will exclude this unit for the calculation
print('Unit outside the range set by channel_to_use, skipping...')
pass
else:
subsample = int(relative_counts[idx2])
index_mask = make_index_mask(spike_clusters, all_cluster_ids[cluster_id2], min_num=0, max_num=subsample,
seed=seed)
pcs = get_unit_pcs(pc_features, index_mask, channel_mask)
labels = np.ones((pcs.shape[0],)) * all_cluster_ids[cluster_id2]
all_pcs = np.concatenate((all_pcs, pcs), 0)
all_labels = np.concatenate((all_labels, labels), 0)
all_pcs = np.reshape(all_pcs, (all_pcs.shape[0], pc_features.shape[1] * channels_to_use.size))
if all_pcs.shape[0] > min_num_pcs:
if 'isolation_distance' in metric_names or 'l_ratio' in metric_names:
isolation_distances[idx], l_ratios[idx] = mahalanobis_metrics(all_pcs, all_labels,
cluster_id)
else:
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
if 'd_prime' in metric_names:
d_primes[idx] = lda_metrics(all_pcs, all_labels, cluster_id)
else:
d_primes[idx] = np.nan
if 'nearest_neighbor' in metric_names:
nn_hit_rates[idx], nn_miss_rates[idx] = nearest_neighbors_metrics(all_pcs, all_labels,
cluster_id,
spikes_for_nn,
n_neighbors)
else:
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
else:
print(f'Unit {str(cluster_id)} only has ' + str(
all_pcs.shape[0]) + ' spikes, which is not enough to compute metric; assigning nan...')
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
d_primes[idx] = np.nan
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
return isolation_distances, l_ratios, d_primes, nn_hit_rates, nn_miss_rates
def calculate_silhouette_score(spike_clusters,
total_units,
pc_features,
pc_feature_ind,
spikes_for_silhouette,
seed=None,
spike_cluster_subset=None,
verbose=True):
random_spike_inds = np.random.RandomState(seed=seed).permutation(spike_clusters.size)
random_spike_inds = random_spike_inds[:spikes_for_silhouette]
num_pc_features = pc_features.shape[1]
num_channels = np.max(pc_feature_ind) + 1
all_pcs = np.zeros((spikes_for_silhouette, num_channels * num_pc_features))
for idx, i in enumerate(random_spike_inds):
unit_id = spike_clusters[i]
channels = pc_feature_ind[unit_id, :]
for j in range(0, num_pc_features):
all_pcs[idx, channels + num_channels * j] = pc_features[i, j, :]
cluster_labels = spike_clusters[random_spike_inds]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
SS = np.empty((total_units, total_units))
SS[:] = np.nan
seen_unit_pairs = set()
for idx1, i in enumerate(cluster_ids):
if verbose:
printProgressBar(idx1 + 1, len(cluster_ids))
for idx2, j in enumerate(all_cluster_ids):
if (i, j) not in seen_unit_pairs and (j, i) not in seen_unit_pairs and i != j:
inds = np.in1d(cluster_labels, np.array([i, j]))
X = all_pcs[inds, :]
labels = cluster_labels[inds]
if len(labels) > 2:
SS[i, j] = silhouette_score(X, labels, random_state=seed)
seen_unit_pairs.add((i, j))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
a = np.nanmin(SS, 0)
b = np.nanmin(SS, 1)
return np.array([np.nanmin([a, b]) for a, b in zip(a, b)])
def calculate_drift_metrics(spike_times,
spike_clusters,
total_units,
pc_features,
pc_feature_ind,
interval_length,
min_spikes_per_interval,
vertical_channel_spacing=10,
channel_locations=None,
spike_cluster_subset=None,
verbose=True):
max_drift = np.zeros((total_units,))
cumulative_drift = np.zeros((total_units,))
positions = get_spike_positions(spike_clusters, pc_features, pc_feature_ind, channel_locations,
vertical_channel_spacing)
interval_starts = np.arange(np.min(spike_times), np.max(spike_times), interval_length)
interval_ends = interval_starts + interval_length
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, len(cluster_ids))
in_cluster = spike_clusters == cluster_id
times_for_cluster = spike_times[in_cluster]
positions_for_cluster = positions[in_cluster]
median_positions = []
for t1, t2 in zip(interval_starts, interval_ends):
in_range = (times_for_cluster > t1) * (times_for_cluster < t2)
if np.sum(in_range) >= min_spikes_per_interval:
median_positions.append(np.median(positions_for_cluster[in_range], 0))
else:
median_positions.append([np.nan, np.nan])
median_positions = np.array(median_positions)
# Extract emi-matrix of shifts in positions (used to extract max_drift and cum_drift)
position_diffs = np.zeros((len(median_positions), len(median_positions)))
for i, pos_i in enumerate(median_positions):
for j, pos_j in enumerate(median_positions):
if j > i:
if not np.isnan(pos_i[0]) and not np.isnan(pos_j[0]):
position_diffs[i, j] = np.linalg.norm(pos_i - pos_j)
else:
position_diffs[i, j] = 0
# Maximum drift among all periods
if np.any(position_diffs > 0):
max_drift[cluster_id] = np.around(np.max(position_diffs[position_diffs > 0]), 2)
# The +1 diagonal contains the step-by-step drifts between intervals.
# Summing them up we obtain cumulative drift
cumulative_drift[cluster_id] = np.around(np.sum(np.diag(position_diffs, 1)), 2)
else:
# not enough spikes
max_drift[cluster_id] = 0
cumulative_drift[cluster_id] = 0
return max_drift, cumulative_drift
# ==========================================================
# IMPLEMENTATION OF ACTUAL METRICS:
# ==========================================================
def isi_violations(spike_train, duration, isi_threshold, min_isi=0):
"""Calculate Inter-Spike Interval (ISI) violations for a spike train.
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Originally written in Matlab by <NAME> (https://github.com/cortex-lab/sortingQuality)
Converted to Python by <NAME>
Inputs:
-------
spike_train : array of monotonically increasing spike times (in seconds) [t1, t2, t3, ...]
duration : length of recording (seconds)
isi_threshold : threshold for classifying adjacent spikes as an ISI violation
- this is the biophysical refractory period
min_isi : minimum possible inter-spike interval (default = 0)
- this is the artificial refractory period enforced by the data acquisition system
or post-processing algorithms
Outputs:
--------
fpRate : rate of contaminating spikes as a fraction of overall rate
- higher values indicate more contamination
num_violations : total number of violations detected
"""
isis_initial = np.diff(spike_train)
if min_isi > 0:
duplicate_spikes = np.where(isis_initial <= min_isi)[0]
spike_train = np.delete(spike_train, duplicate_spikes + 1)
isis = np.diff(spike_train)
num_spikes = len(spike_train)
num_violations = sum(isis < isi_threshold)
violation_time = 2 * num_spikes * (isi_threshold - min_isi)
total_rate = firing_rate(spike_train, duration)
violation_rate = num_violations / violation_time
fpRate = violation_rate / total_rate
return fpRate, num_violations
def presence_ratio(spike_train, duration, num_bin_edges=101):
"""Calculate fraction of time the unit is present within an epoch.
Inputs:
-------
spike_train : array of spike times
duration : length of recording (seconds)
num_bin_edges : number of bin edges for histogram
- total bins = num_bin_edges - 1
Outputs:
--------
presence_ratio : fraction of time bins in which this unit is spiking
"""
h, b = np.histogram(spike_train, np.linspace(0, duration, num_bin_edges))
return np.sum(h > 0) / (num_bin_edges - 1)
def firing_rate(spike_train, duration):
"""Calculate firing rate for a spike train.
If either temporal bound is not specified, the first and last spike time are used by default.
Inputs:
-------
spike_train : array of spike times (in seconds)
duration : length of recording (in seconds)
Outputs:
--------
fr : float
Firing rate in Hz
"""
fr = spike_train.size / duration
return fr
def amplitude_cutoff(amplitudes, num_histogram_bins=500, histogram_smoothing_value=3):
""" Calculate approximate fraction of spikes missing from a distribution of amplitudes
Assumes the amplitude histogram is symmetric (not valid in the presence of drift)
Inspired by metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Input:
------
amplitudes : numpy.ndarray
Array of amplitudes (don't need to be in physical units)
num_histogram_bins : int
Number of bins for calculating amplitude histogram
histogram_smoothing_value : float
Gaussian filter window for smoothing amplitude histogram
Output:
-------
fraction_missing : float
Fraction of missing spikes (ranges between 0 and 0.5)
If more than 50% of spikes are missing, an accurate estimate isn't possible
"""
h, b = np.histogram(amplitudes, num_histogram_bins, density=True)
pdf = gaussian_filter1d(h, histogram_smoothing_value)
support = b[:-1]
peak_index = np.argmax(pdf)
G = np.argmin(np.abs(pdf[peak_index:] - pdf[0])) + peak_index
bin_size = np.mean(np.diff(support))
fraction_missing = np.sum(pdf[G:]) * bin_size
fraction_missing = np.min([fraction_missing, 0.5])
return fraction_missing
def mahalanobis_metrics(all_pcs, all_labels, this_unit_id):
""" Calculates isolation distance and L-ratio (metrics computed from Mahalanobis distance)
Based on metrics described in Schmitzer-Torbert et al. (2005) Neurosci 131: 1-11
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
Outputs:
--------
isolation_distance : float
Isolation distance of this unit
l_ratio : float
L-ratio for this unit
"""
pcs_for_this_unit = all_pcs[all_labels == this_unit_id, :]
pcs_for_other_units = all_pcs[all_labels != this_unit_id, :]
mean_value = np.expand_dims(np.mean(pcs_for_this_unit, 0), 0)
try:
VI = np.linalg.inv(np.cov(pcs_for_this_unit.T))
except np.linalg.linalg.LinAlgError: # case of singular matrix
return np.nan, np.nan
mahalanobis_other = np.sort(cdist(mean_value,
pcs_for_other_units,
'mahalanobis', VI=VI)[0])
mahalanobis_self = np.sort(cdist(mean_value,
pcs_for_this_unit,
'mahalanobis', VI=VI)[0])
n = np.min([pcs_for_this_unit.shape[0], pcs_for_other_units.shape[0]]) # number of spikes
if n >= 2:
dof = pcs_for_this_unit.shape[1] # number of features
l_ratio = np.sum(1 - chi2.cdf(pow(mahalanobis_other, 2), dof)) / mahalanobis_self.shape[0]
isolation_distance = pow(mahalanobis_other[n - 1], 2)
# if math.isnan(l_ratio):
# print("NaN detected", mahalanobis_other, VI)
else:
l_ratio = np.nan
isolation_distance = np.nan
return isolation_distance, l_ratio
def lda_metrics(all_pcs, all_labels, this_unit_id):
""" Calculates d-prime based on Linear Discriminant Analysis
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
Outputs:
--------
d_prime : float
Isolation distance of this unit
l_ratio : float
L-ratio for this unit
"""
X = all_pcs
y = np.zeros((X.shape[0],), dtype='bool')
y[all_labels == this_unit_id] = True
lda = LDA(n_components=1)
X_flda = lda.fit_transform(X, y)
flda_this_cluster = X_flda[np.where(y)[0]]
flda_other_cluster = X_flda[np.where(np.invert(y))[0]]
d_prime = (np.mean(flda_this_cluster) - np.mean(flda_other_cluster)) / np.sqrt(
0.5 * (np.std(flda_this_cluster) ** 2 + np.std(flda_other_cluster) ** 2))
return d_prime
def nearest_neighbors_metrics(all_pcs, all_labels, this_unit_id, spikes_for_nn, n_neighbors):
""" Calculates unit contamination based on NearestNeighbors search in PCA space
Based on metrics described in Chung, Magland et al. (2017) Neuron 95: 1381-1394
A is a (hopefully) representative subset of cluster X
NN_hit(X) = 1/k \sum_i=1^k |{x in A such that ith closest neighbor is in X}| / |A|
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
spikes_for_nn : Int
number of spikes to use (calculation can be very slow when this number is >20000)
n_neighbors : Int
number of neighbors to use
Outputs:
--------
hit_rate : float
Fraction of neighbors for target cluster that are also in target cluster
miss_rate : float
Fraction of neighbors outside target cluster that are in target cluster
"""
total_spikes = all_pcs.shape[0]
ratio = spikes_for_nn / total_spikes
this_unit = all_labels == this_unit_id
X = np.concatenate((all_pcs[this_unit, :], all_pcs[np.invert(this_unit), :]), 0)
n = np.sum(this_unit)
if ratio < 1:
inds = np.arange(0, X.shape[0] - 1, 1 / ratio).astype('int')
X = X[inds, :]
n = int(n * ratio)
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
this_cluster_inds = np.arange(n)
this_cluster_nearest = indices[:n, 1:].flatten()
other_cluster_nearest = indices[n:, 1:].flatten()
hit_rate = np.mean(this_cluster_nearest < n)
miss_rate = np.mean(other_cluster_nearest < n)
return hit_rate, miss_rate
# ==========================================================
# HELPER FUNCTIONS:
# ==========================================================
def make_index_mask(spike_clusters, unit_id, min_num, max_num, seed=None):
""" Create a mask for the spike index dimensions of the pc_features array
Inputs:
-------
spike_clusters : numpy.ndarray (num_spikes x 0)
Contains cluster IDs for all spikes in pc_features array
unit_id : Int
ID for this unit
min_num : Int
Minimum number of spikes to return; if there are not enough spikes for this unit, return all False
max_num : Int
Maximum number of spikes to return; if too many spikes for this unit, return a random subsample
seed: int
Random seed for reproducibility
Output:
-------
index_mask : numpy.ndarray (boolean)
Mask of spike indices for pc_features array
"""
index_mask = spike_clusters == unit_id
inds = np.where(index_mask)[0]
if len(inds) < min_num:
index_mask = np.zeros((spike_clusters.size,), dtype='bool')
else:
index_mask = np.zeros((spike_clusters.size,), dtype='bool')
order = np.random.RandomState(seed=seed).permutation(inds.size)
index_mask[inds[order[:max_num]]] = True
return index_mask
def make_channel_mask(unit_id, pc_feature_ind, channels_to_use):
""" Create a mask for the channel dimension of the pc_features array
Inputs:
-------
unit_id : Int
ID for this unit
pc_feature_ind : np.ndarray
Channels used for PC calculation for each unit
channels_to_use : np.ndarray
Channels to use for calculating metrics
Output:
-------
channel_mask : numpy.ndarray
Channel indices to extract from pc_features array
"""
these_inds = pc_feature_ind[unit_id, :]
channel_mask = [np.argwhere(these_inds == i)[0][0] for i in channels_to_use]
return np.array(channel_mask)
def get_unit_pcs(these_pc_features, index_mask, channel_mask):
""" Use the index_mask and channel_mask to return PC features for one unit
Inputs:
-------
these_pc_features : numpy.ndarray (float)
Array of pre-computed PC features (num_spikes x num_PCs x num_channels)
index_mask : numpy.ndarray (boolean)
Mask for spike index dimension of pc_features array
channel_mask : numpy.ndarray (boolean)
Mask for channel index dimension of pc_features array
Output:
-------
unit_PCs : numpy.ndarray (float)
PCs for one unit (num_spikes x num_PCs x num_channels)
"""
unit_PCs = these_pc_features[index_mask, :, :]
unit_PCs = unit_PCs[:, :, channel_mask]
return unit_PCs
def find_neighboring_channels(peak_channel, channel_list, num_channels_to_compare, channel_locations):
"""
Finds k nearest channels to the peak channel of a unit
Parameters
----------
peak_channel: int
ID of channel with largest waveform amplitude
channel_list: numpy.ndarray
IDs of channels being considered
num_channels_to_compare: int
Number of nearest channels to return
channel_locations: numpy.ndarray, (n_channels, 2)
x,y coordinates of the channels in channel_list
Returns
-------
neighboring_channels: array_like
id of k channels that neighbor peak channel (including the peak channel itself)
"""
# get peak channel location
channel_idx = list(channel_list).index(peak_channel)
peak_channel_location = channel_locations[channel_idx]
# compute pairwise distance
distances = [np.linalg.norm(peak_channel_location - loc) for loc in channel_locations]
# get k closest channels (+1 because distance 0 is peak_channel)
neighboring_channels_inds = np.argsort(distances)[:num_channels_to_compare]
neighboring_channels = channel_list[neighboring_channels_inds]
return neighboring_channels
|
[
"numpy.invert",
"numpy.argsort",
"numpy.array",
"numpy.linalg.norm",
"numpy.nanmin",
"numpy.cov",
"numpy.arange",
"numpy.random.RandomState",
"numpy.mean",
"numpy.histogram",
"numpy.reshape",
"numpy.where",
"numpy.delete",
"numpy.diff",
"numpy.max",
"numpy.linspace",
"numpy.empty",
"sklearn.neighbors.NearestNeighbors",
"numpy.min",
"pandas.DataFrame",
"warnings.simplefilter",
"numpy.concatenate",
"sklearn.metrics.silhouette_score",
"numpy.abs",
"collections.OrderedDict",
"numpy.ones",
"numpy.argmax",
"numpy.any",
"numpy.squeeze",
"numpy.isnan",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.std",
"numpy.median",
"numpy.unique",
"numpy.logical_and",
"scipy.spatial.distance.cdist",
"warnings.catch_warnings",
"numpy.diag",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis"
] |
[((1982, 1996), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1994, 1996), True, 'import pandas as pd\n'), ((8894, 8918), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (8902, 8918), True, 'import numpy as np\n'), ((9794, 9818), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (9802, 9818), True, 'import numpy as np\n'), ((10294, 10318), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (10302, 10318), True, 'import numpy as np\n'), ((11027, 11051), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (11035, 11051), True, 'import numpy as np\n'), ((11694, 11718), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (11702, 11718), True, 'import numpy as np\n'), ((14464, 14489), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (14473, 14489), True, 'import numpy as np\n'), ((14643, 14683), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {'dtype': '"""uint16"""'}), "((total_units,), dtype='uint16')\n", (14651, 14683), True, 'import numpy as np\n'), ((14711, 14759), 'numpy.zeros', 'np.zeros', (['(total_units, num_channels_to_compare)'], {}), '((total_units, num_channels_to_compare))\n', (14719, 14759), True, 'import numpy as np\n'), ((14786, 14810), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (14794, 14810), True, 'import numpy as np\n'), ((14826, 14850), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (14834, 14850), True, 'import numpy as np\n'), ((14866, 14890), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (14874, 14890), True, 'import numpy as np\n'), ((14910, 14934), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (14918, 14934), True, 'import numpy as np\n'), ((14955, 14979), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (14963, 14979), True, 'import numpy as np\n'), ((20546, 20611), 'numpy.zeros', 'np.zeros', (['(spikes_for_silhouette, num_channels * num_pc_features)'], {}), '((spikes_for_silhouette, num_channels * num_pc_features))\n', (20554, 20611), True, 'import numpy as np\n'), ((20945, 20970), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (20954, 20970), True, 'import numpy as np\n'), ((21113, 21149), 'numpy.empty', 'np.empty', (['(total_units, total_units)'], {}), '((total_units, total_units))\n', (21121, 21149), True, 'import numpy as np\n'), ((22501, 22525), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (22509, 22525), True, 'import numpy as np\n'), ((22549, 22573), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (22557, 22573), True, 'import numpy as np\n'), ((26111, 26131), 'numpy.diff', 'np.diff', (['spike_train'], {}), '(spike_train)\n', (26118, 26131), True, 'import numpy as np\n'), ((26296, 26316), 'numpy.diff', 'np.diff', (['spike_train'], {}), '(spike_train)\n', (26303, 26316), True, 'import numpy as np\n'), ((28538, 28596), 'numpy.histogram', 'np.histogram', (['amplitudes', 'num_histogram_bins'], {'density': '(True)'}), '(amplitudes, num_histogram_bins, density=True)\n', (28550, 28596), True, 'import numpy as np\n'), ((28608, 28655), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['h', 'histogram_smoothing_value'], {}), '(h, histogram_smoothing_value)\n', (28625, 28655), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((28695, 28709), 'numpy.argmax', 'np.argmax', (['pdf'], {}), '(pdf)\n', (28704, 28709), True, 'import numpy as np\n'), ((28892, 28923), 'numpy.min', 'np.min', (['[fraction_missing, 0.5]'], {}), '([fraction_missing, 0.5])\n', (28898, 28923), True, 'import numpy as np\n'), ((30377, 30443), 'numpy.min', 'np.min', (['[pcs_for_this_unit.shape[0], pcs_for_other_units.shape[0]]'], {}), '([pcs_for_this_unit.shape[0], pcs_for_other_units.shape[0]])\n', (30383, 30443), True, 'import numpy as np\n'), ((31589, 31626), 'numpy.zeros', 'np.zeros', (['(X.shape[0],)'], {'dtype': '"""bool"""'}), "((X.shape[0],), dtype='bool')\n", (31597, 31626), True, 'import numpy as np\n'), ((31679, 31698), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LDA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (31682, 31698), True, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n'), ((33381, 33398), 'numpy.sum', 'np.sum', (['this_unit'], {}), '(this_unit)\n', (33387, 33398), True, 'import numpy as np\n'), ((33690, 33702), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (33699, 33702), True, 'import numpy as np\n'), ((33827, 33860), 'numpy.mean', 'np.mean', (['(this_cluster_nearest < n)'], {}), '(this_cluster_nearest < n)\n', (33834, 33860), True, 'import numpy as np\n'), ((33877, 33911), 'numpy.mean', 'np.mean', (['(other_cluster_nearest < n)'], {}), '(other_cluster_nearest < n)\n', (33884, 33911), True, 'import numpy as np\n'), ((35893, 35915), 'numpy.array', 'np.array', (['channel_mask'], {}), '(channel_mask)\n', (35901, 35915), True, 'import numpy as np\n'), ((2096, 2118), 'numpy.max', 'np.max', (['spike_clusters'], {}), '(spike_clusters)\n', (2102, 2118), True, 'import numpy as np\n'), ((2199, 2276), 'numpy.logical_and', 'np.logical_and', (['(spike_times >= epoch.start_time)', '(spike_times < epoch.end_time)'], {}), '(spike_times >= epoch.start_time, spike_times < epoch.end_time)\n', (2213, 2276), True, 'import numpy as np\n'), ((2303, 2319), 'numpy.sum', 'np.sum', (['in_epoch'], {}), '(in_epoch)\n', (2309, 2319), True, 'import numpy as np\n'), ((8850, 8875), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (8859, 8875), True, 'import numpy as np\n'), ((9754, 9779), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (9763, 9779), True, 'import numpy as np\n'), ((10435, 10460), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (10444, 10460), True, 'import numpy as np\n'), ((10981, 11006), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (10990, 11006), True, 'import numpy as np\n'), ((11643, 11668), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (11652, 11668), True, 'import numpy as np\n'), ((15055, 15095), 'numpy.squeeze', 'np.squeeze', (['(spike_clusters == cluster_id)'], {}), '(spike_clusters == cluster_id)\n', (15065, 15095), True, 'import numpy as np\n'), ((16505, 16538), 'numpy.zeros', 'np.zeros', (['units_for_channel.shape'], {}), '(units_for_channel.shape)\n', (16513, 16538), True, 'import numpy as np\n'), ((17091, 17148), 'numpy.zeros', 'np.zeros', (['(0, pc_features.shape[1], channels_to_use.size)'], {}), '((0, pc_features.shape[1], channels_to_use.size))\n', (17099, 17148), True, 'import numpy as np\n'), ((17170, 17184), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (17178, 17184), True, 'import numpy as np\n'), ((18216, 18304), 'numpy.reshape', 'np.reshape', (['all_pcs', '(all_pcs.shape[0], pc_features.shape[1] * channels_to_use.size)'], {}), '(all_pcs, (all_pcs.shape[0], pc_features.shape[1] *\n channels_to_use.size))\n', (18226, 18304), True, 'import numpy as np\n'), ((20504, 20526), 'numpy.max', 'np.max', (['pc_feature_ind'], {}), '(pc_feature_ind)\n', (20510, 20526), True, 'import numpy as np\n'), ((21778, 21803), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (21801, 21803), False, 'import warnings\n'), ((21813, 21844), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (21834, 21844), False, 'import warnings\n'), ((21857, 21873), 'numpy.nanmin', 'np.nanmin', (['SS', '(0)'], {}), '(SS, 0)\n', (21866, 21873), True, 'import numpy as np\n'), ((21886, 21902), 'numpy.nanmin', 'np.nanmin', (['SS', '(1)'], {}), '(SS, 1)\n', (21895, 21902), True, 'import numpy as np\n'), ((22769, 22788), 'numpy.min', 'np.min', (['spike_times'], {}), '(spike_times)\n', (22775, 22788), True, 'import numpy as np\n'), ((22790, 22809), 'numpy.max', 'np.max', (['spike_times'], {}), '(spike_times)\n', (22796, 22809), True, 'import numpy as np\n'), ((22999, 23024), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (23008, 23024), True, 'import numpy as np\n'), ((23736, 23762), 'numpy.array', 'np.array', (['median_positions'], {}), '(median_positions)\n', (23744, 23762), True, 'import numpy as np\n'), ((24356, 24382), 'numpy.any', 'np.any', (['(position_diffs > 0)'], {}), '(position_diffs > 0)\n', (24362, 24382), True, 'import numpy as np\n'), ((26239, 26283), 'numpy.delete', 'np.delete', (['spike_train', '(duplicate_spikes + 1)'], {}), '(spike_train, duplicate_spikes + 1)\n', (26248, 26283), True, 'import numpy as np\n'), ((27127, 27166), 'numpy.linspace', 'np.linspace', (['(0)', 'duration', 'num_bin_edges'], {}), '(0, duration, num_bin_edges)\n', (27138, 27166), True, 'import numpy as np\n'), ((27180, 27193), 'numpy.sum', 'np.sum', (['(h > 0)'], {}), '(h > 0)\n', (27186, 27193), True, 'import numpy as np\n'), ((28800, 28816), 'numpy.diff', 'np.diff', (['support'], {}), '(support)\n', (28807, 28816), True, 'import numpy as np\n'), ((28841, 28856), 'numpy.sum', 'np.sum', (['pdf[G:]'], {}), '(pdf[G:])\n', (28847, 28856), True, 'import numpy as np\n'), ((29827, 29856), 'numpy.mean', 'np.mean', (['pcs_for_this_unit', '(0)'], {}), '(pcs_for_this_unit, 0)\n', (29834, 29856), True, 'import numpy as np\n'), ((34913, 34933), 'numpy.where', 'np.where', (['index_mask'], {}), '(index_mask)\n', (34921, 34933), True, 'import numpy as np\n'), ((34987, 35033), 'numpy.zeros', 'np.zeros', (['(spike_clusters.size,)'], {'dtype': '"""bool"""'}), "((spike_clusters.size,), dtype='bool')\n", (34995, 35033), True, 'import numpy as np\n'), ((35065, 35111), 'numpy.zeros', 'np.zeros', (['(spike_clusters.size,)'], {'dtype': '"""bool"""'}), "((spike_clusters.size,), dtype='bool')\n", (35073, 35111), True, 'import numpy as np\n'), ((37564, 37607), 'numpy.linalg.norm', 'np.linalg.norm', (['(peak_channel_location - loc)'], {}), '(peak_channel_location - loc)\n', (37578, 37607), True, 'import numpy as np\n'), ((37739, 37760), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (37749, 37760), True, 'import numpy as np\n'), ((6726, 6748), 'numpy.arange', 'np.arange', (['total_units'], {}), '(total_units)\n', (6735, 6748), True, 'import numpy as np\n'), ((15123, 15162), 'numpy.mean', 'np.mean', (['pc_features[for_unit, 0, :]', '(0)'], {}), '(pc_features[for_unit, 0, :], 0)\n', (15130, 15162), True, 'import numpy as np\n'), ((16636, 16690), 'numpy.sum', 'np.sum', (['(spike_clusters == all_cluster_ids[cluster_id2])'], {}), '(spike_clusters == all_cluster_ids[cluster_id2])\n', (16642, 16690), True, 'import numpy as np\n'), ((16813, 16847), 'numpy.where', 'np.where', (['(units_for_channel == idx)'], {}), '(units_for_channel == idx)\n', (16821, 16847), True, 'import numpy as np\n'), ((20310, 20342), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (20331, 20342), True, 'import numpy as np\n'), ((21925, 21942), 'numpy.nanmin', 'np.nanmin', (['[a, b]'], {}), '([a, b])\n', (21934, 21942), True, 'import numpy as np\n'), ((26180, 26213), 'numpy.where', 'np.where', (['(isis_initial <= min_isi)'], {}), '(isis_initial <= min_isi)\n', (26188, 26213), True, 'import numpy as np\n'), ((28728, 28761), 'numpy.abs', 'np.abs', (['(pdf[peak_index:] - pdf[0])'], {}), '(pdf[peak_index:] - pdf[0])\n', (28734, 28761), True, 'import numpy as np\n'), ((29898, 29925), 'numpy.cov', 'np.cov', (['pcs_for_this_unit.T'], {}), '(pcs_for_this_unit.T)\n', (29904, 29925), True, 'import numpy as np\n'), ((30058, 30118), 'scipy.spatial.distance.cdist', 'cdist', (['mean_value', 'pcs_for_other_units', '"""mahalanobis"""'], {'VI': 'VI'}), "(mean_value, pcs_for_other_units, 'mahalanobis', VI=VI)\n", (30063, 30118), False, 'from scipy.spatial.distance import cdist\n'), ((30231, 30289), 'scipy.spatial.distance.cdist', 'cdist', (['mean_value', 'pcs_for_this_unit', '"""mahalanobis"""'], {'VI': 'VI'}), "(mean_value, pcs_for_this_unit, 'mahalanobis', VI=VI)\n", (30236, 30289), False, 'from scipy.spatial.distance import cdist\n'), ((31769, 31780), 'numpy.where', 'np.where', (['y'], {}), '(y)\n', (31777, 31780), True, 'import numpy as np\n'), ((31860, 31886), 'numpy.mean', 'np.mean', (['flda_this_cluster'], {}), '(flda_this_cluster)\n', (31867, 31886), True, 'import numpy as np\n'), ((31889, 31916), 'numpy.mean', 'np.mean', (['flda_other_cluster'], {}), '(flda_other_cluster)\n', (31896, 31916), True, 'import numpy as np\n'), ((33549, 33613), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'n_neighbors', 'algorithm': '"""ball_tree"""'}), "(n_neighbors=n_neighbors, algorithm='ball_tree')\n", (33565, 33613), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((18094, 18127), 'numpy.concatenate', 'np.concatenate', (['(all_pcs, pcs)', '(0)'], {}), '((all_pcs, pcs), 0)\n', (18108, 18127), True, 'import numpy as np\n'), ((18157, 18196), 'numpy.concatenate', 'np.concatenate', (['(all_labels, labels)', '(0)'], {}), '((all_labels, labels), 0)\n', (18171, 18196), True, 'import numpy as np\n'), ((23500, 23516), 'numpy.sum', 'np.sum', (['in_range'], {}), '(in_range)\n', (23506, 23516), True, 'import numpy as np\n'), ((24430, 24472), 'numpy.max', 'np.max', (['position_diffs[position_diffs > 0]'], {}), '(position_diffs[position_diffs > 0])\n', (24436, 24472), True, 'import numpy as np\n'), ((31826, 31838), 'numpy.invert', 'np.invert', (['y'], {}), '(y)\n', (31835, 31838), True, 'import numpy as np\n'), ((33433, 33472), 'numpy.arange', 'np.arange', (['(0)', '(X.shape[0] - 1)', '(1 / ratio)'], {}), '(0, X.shape[0] - 1, 1 / ratio)\n', (33442, 33472), True, 'import numpy as np\n'), ((35128, 35160), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (35149, 35160), True, 'import numpy as np\n'), ((35820, 35848), 'numpy.argwhere', 'np.argwhere', (['(these_inds == i)'], {}), '(these_inds == i)\n', (35831, 35848), True, 'import numpy as np\n'), ((18011, 18035), 'numpy.ones', 'np.ones', (['(pcs.shape[0],)'], {}), '((pcs.shape[0],))\n', (18018, 18035), True, 'import numpy as np\n'), ((21509, 21525), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (21517, 21525), True, 'import numpy as np\n'), ((21677, 21723), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['X', 'labels'], {'random_state': 'seed'}), '(X, labels, random_state=seed)\n', (21693, 21723), False, 'from sklearn.metrics import silhouette_score\n'), ((23585, 23630), 'numpy.median', 'np.median', (['positions_for_cluster[in_range]', '(0)'], {}), '(positions_for_cluster[in_range], 0)\n', (23594, 23630), True, 'import numpy as np\n'), ((24676, 24702), 'numpy.diag', 'np.diag', (['position_diffs', '(1)'], {}), '(position_diffs, 1)\n', (24683, 24702), True, 'import numpy as np\n'), ((33342, 33362), 'numpy.invert', 'np.invert', (['this_unit'], {}), '(this_unit)\n', (33351, 33362), True, 'import numpy as np\n'), ((6919, 7412), 'collections.OrderedDict', 'OrderedDict', (["(('cluster_id', cluster_ids_out), ('firing_rate', firing_rate), (\n 'presence_ratio', presence_ratio), ('isi_violation', isi_viol), (\n 'amplitude_cutoff', amplitude_cutoff), ('isolation_distance',\n isolation_distance), ('l_ratio', l_ratio), ('d_prime', d_prime), (\n 'nn_hit_rate', nn_hit_rate), ('nn_miss_rate', nn_miss_rate), (\n 'silhouette_score', silhouette_score), ('max_drift', max_drift), (\n 'cumulative_drift', cumulative_drift), ('epoch_name', epoch_name))"], {}), "((('cluster_id', cluster_ids_out), ('firing_rate', firing_rate),\n ('presence_ratio', presence_ratio), ('isi_violation', isi_viol), (\n 'amplitude_cutoff', amplitude_cutoff), ('isolation_distance',\n isolation_distance), ('l_ratio', l_ratio), ('d_prime', d_prime), (\n 'nn_hit_rate', nn_hit_rate), ('nn_miss_rate', nn_miss_rate), (\n 'silhouette_score', silhouette_score), ('max_drift', max_drift), (\n 'cumulative_drift', cumulative_drift), ('epoch_name', epoch_name)))\n", (6930, 7412), False, 'from collections import OrderedDict\n'), ((24197, 24226), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos_i - pos_j)'], {}), '(pos_i - pos_j)\n', (24211, 24226), True, 'import numpy as np\n'), ((31944, 31969), 'numpy.std', 'np.std', (['flda_this_cluster'], {}), '(flda_this_cluster)\n', (31950, 31969), True, 'import numpy as np\n'), ((31977, 32003), 'numpy.std', 'np.std', (['flda_other_cluster'], {}), '(flda_other_cluster)\n', (31983, 32003), True, 'import numpy as np\n'), ((24103, 24121), 'numpy.isnan', 'np.isnan', (['pos_i[0]'], {}), '(pos_i[0])\n', (24111, 24121), True, 'import numpy as np\n'), ((24130, 24148), 'numpy.isnan', 'np.isnan', (['pos_j[0]'], {}), '(pos_j[0])\n', (24138, 24148), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import copy
from .pdp_calc_utils import _sample_data, _find_onehot_actual, _find_closest
from sklearn.cluster import MiniBatchKMeans, KMeans
def _pdp_plot_title(n_grids, feature_name, ax, multi_flag, which_class, plot_params):
"""
Draw pdp plot title
:param n_grids: number of grids
:param feature_name: name of the feature
:param ax: axes to plot on
:param multi_flag: whether it is a subplot of a multi-classes plot
:param which_class: which class to plot
:param plot_params: values of plot parameters
"""
font_family = 'Arial'
title = 'PDP for %s' % feature_name
subtitle = "Number of unique grid points: %d" % n_grids
title_fontsize = 15
subtitle_fontsize = 12
if plot_params is not None:
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'title' in plot_params.keys():
title = plot_params['title']
if 'title_fontsize' in plot_params.keys():
title_fontsize = plot_params['title_fontsize']
if 'subtitle_fontsize' in plot_params.keys():
subtitle_fontsize = plot_params['subtitle_fontsize']
ax.set_facecolor('white')
if multi_flag:
ax.text(0, 0.7, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
ax.text(0, 0.45, "For Class %d" % which_class, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family)
ax.text(0, 0.25, subtitle, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family, color='grey')
else:
ax.text(0, 0.7, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
ax.text(0, 0.4, subtitle, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family, color='grey')
ax.axis('off')
def _axes_modify(font_family, ax, top=False, right=False, legend=False):
# modify the axes
for tick in ax.get_xticklabels():
tick.set_fontname(font_family)
for tick in ax.get_yticklabels():
tick.set_fontname(font_family)
ax.set_facecolor('white')
ax.tick_params(axis='both', which='major', labelsize=10, labelcolor='#424242', colors='#9E9E9E')
for d in ['top', 'bottom', 'right', 'left']:
ax.spines[d].set_visible(False)
if not legend:
if top:
ax.get_xaxis().tick_top()
elif right:
ax.get_yaxis().tick_right()
else:
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.grid(True, 'major', 'x', ls='--', lw=.5, c='k', alpha=.3)
ax.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
else:
ax.set_xticks([])
ax.set_yticks([])
def _pdp_plot(pdp_isolate_out, feature_name, center, plot_org_pts, plot_lines, frac_to_plot,
cluster, n_cluster_centers, cluster_method, x_quantile, ax, plot_params):
"""
Plot partial dependent plot
:param pdp_isolate_out: instance of pdp_isolate_obj
a calculated pdp_isolate_obj instance
:param feature_name: string
name of the feature, not necessary the same as the column name
:param center: boolean, default=True
whether to center the plot
:param plot_org_pts: boolean, default=False
whether to plot out the original points
:param plot_lines: boolean, default=False
whether to plot out the individual lines
:param frac_to_plot: float or integer, default=1
how many points or lines to plot, can be a integer or a float
:param cluster: boolean, default=False
whether to cluster the individual lines and only plot out the cluster centers
:param n_cluster_centers: integer, default=None
number of cluster centers
:param cluster_method: string, default=None
cluster method to use, default is KMeans, if 'approx' is passed, MiniBatchKMeans is used
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param ax: axes to plot on
:param plot_params: dict, default=None
values of plot parameters
"""
font_family = 'Arial'
xticks_rotation = 0
if plot_params is not None:
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'xticks_rotation' in plot_params.keys():
xticks_rotation = plot_params['xticks_rotation']
# modify axes
_axes_modify(font_family, ax)
ax.set_xlabel(feature_name, fontsize=10)
feature_type = pdp_isolate_out.feature_type
feature_grids = pdp_isolate_out.feature_grids
display_columns = pdp_isolate_out.display_columns
actual_columns = pdp_isolate_out.actual_columns
if feature_type == 'binary' or feature_type == 'onehot' or x_quantile:
x = range(len(feature_grids))
ax.set_xticks(x)
ax.set_xticklabels(display_columns, rotation=xticks_rotation)
else:
# for numeric feature
x = feature_grids
ice_lines = copy.deepcopy(pdp_isolate_out.ice_lines)
pdp_y = copy.deepcopy(pdp_isolate_out.pdp)
# whether to fill between std upper and lower
# whether to highlight pdp line
std_fill = True
pdp_hl = False
# whether to center the plot
if center:
pdp_y -= pdp_y[0]
for col in feature_grids[1:]:
ice_lines[col] -= ice_lines[feature_grids[0]]
ice_lines['actual_preds'] -= ice_lines[feature_grids[0]]
ice_lines[feature_grids[0]] = 0
if cluster or plot_lines:
std_fill = False
pdp_hl = True
if cluster:
_ice_cluster_plot(x=x, ice_lines=ice_lines, feature_grids=feature_grids, n_cluster_centers=n_cluster_centers,
cluster_method=cluster_method, ax=ax, plot_params=plot_params)
else:
ice_plot_data = _sample_data(ice_lines=ice_lines, frac_to_plot=frac_to_plot)
_ice_line_plot(x=x, ice_plot_data=ice_plot_data, feature_grids=feature_grids, ax=ax, plot_params=plot_params)
if plot_org_pts:
ice_lines_temp = ice_lines.copy()
if feature_type == 'onehot':
ice_lines_temp['x'] = ice_lines_temp[actual_columns].apply(lambda x: _find_onehot_actual(x), axis=1)
ice_lines_temp = ice_lines_temp[~ice_lines_temp['x'].isnull()].reset_index(drop=True)
elif feature_type == 'numeric':
feature_grids = pdp_isolate_out.feature_grids
ice_lines_temp = ice_lines_temp[(ice_lines_temp[actual_columns[0]] >= feature_grids[0])
& (ice_lines_temp[actual_columns[0]] <= feature_grids[-1])]
if x_quantile:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]].apply(lambda x: _find_closest(x, feature_grids))
else:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]]
else:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]]
ice_plot_data_pts = _sample_data(ice_lines=ice_lines_temp, frac_to_plot=frac_to_plot)
_ice_plot_pts(ice_plot_data_pts=ice_plot_data_pts, ax=ax, plot_params=plot_params)
std = ice_lines[feature_grids].std().values
_pdp_std_plot(x=x, y=pdp_y, std=std, std_fill=std_fill, pdp_hl=pdp_hl, ax=ax, plot_params=plot_params)
def _pdp_std_plot(x, y, std, std_fill, pdp_hl, ax, plot_params):
"""
PDP basic plot
:param x: x axis values
:param y: pdp values
:param std: std values
:param std_fill: whether to fill between std upper and lower
:param pdp_hl: whether to highlight pdp line
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
upper = y + std
lower = y - std
pdp_color = '#1A4E5D'
pdp_hl_color = '#FEDC00'
pdp_linewidth = 2
zero_color = '#E75438'
zero_linewidth = 1.5
fill_color = '#66C2D7'
fill_alpha = 0.2
markersize = 5
if plot_params is not None:
if 'pdp_color' in plot_params.keys():
pdp_color = plot_params['pdp_color']
if 'pdp_hl_color' in plot_params.keys():
pdp_hl_color = plot_params['pdp_hl_color']
if 'pdp_linewidth' in plot_params.keys():
pdp_linewidth = plot_params['pdp_linewidth']
if 'zero_color' in plot_params.keys():
zero_color = plot_params['zero_color']
if 'zero_linewidth' in plot_params.keys():
zero_linewidth = plot_params['zero_linewidth']
if 'fill_color' in plot_params.keys():
fill_color = plot_params['fill_color']
if 'fill_alpha' in plot_params.keys():
fill_alpha = plot_params['fill_alpha']
if 'markersize' in plot_params.keys():
markersize = plot_params['markersize']
if pdp_hl:
ax.plot(x, y, color=pdp_hl_color, linewidth=pdp_linewidth * 3, alpha=0.8)
ax.plot(x, y, color=pdp_color, linewidth=pdp_linewidth, marker='o', markersize=markersize)
ax.plot(x, [0] * y, linestyle='--', linewidth=zero_linewidth, color=zero_color)
if std_fill:
ax.fill_between(x, upper, lower, alpha=fill_alpha, color=fill_color)
ax.set_ylim(np.min([np.min(lower) * 2, 0]), np.max([np.max(upper) * 2, 0]))
def _ice_plot_pts(ice_plot_data_pts, ax, plot_params):
"""
Plot the real data points
:param ice_plot_data_pts: data points to plot
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
point_size = 50
point_pos_color = '#5BB573'
point_neg_color = '#E75438'
if plot_params is not None:
if 'point_size' in plot_params.keys():
point_size = plot_params['point_size']
if 'point_pos_color' in plot_params.keys():
point_pos_color = plot_params['point_pos_color']
if 'point_neg_color' in plot_params.keys():
point_neg_color = plot_params['point_neg_color']
ice_plot_data_pts['color'] = ice_plot_data_pts['actual_preds'].apply(lambda x: point_pos_color if x >= 0 else point_neg_color)
ax.scatter(ice_plot_data_pts['x'], ice_plot_data_pts['actual_preds'], s=point_size, marker="+", linewidth=1,
color=ice_plot_data_pts['color'])
def _ice_line_plot(x, ice_plot_data, feature_grids, ax, plot_params):
"""
Plot the ice lines
:param x: x axis values
:param ice_plot_data: ice lines to plot
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
linewidth = np.max([1.0 / np.log10(ice_plot_data.shape[0]), 0.3])
linealpha = np.max([1.0 / np.log10(ice_plot_data.shape[0]), 0.3])
line_cmap = 'Blues'
if plot_params is not None:
if 'line_cmap' in plot_params.keys():
line_cmap = plot_params['line_cmap']
colors = plt.get_cmap(line_cmap)(np.linspace(0, 1, 20))[5:15]
for i in range(len(ice_plot_data)):
y = list(ice_plot_data[feature_grids].iloc[i].values)
ax.plot(x, y, linewidth=linewidth, c=colors[i % 10], alpha=linealpha)
def _ice_cluster_plot(x, ice_lines, feature_grids, n_cluster_centers, cluster_method, ax, plot_params):
"""
Cluster the ice lines and plot out the cluster centers
:param x: x axis values
:param ice_lines: ice lines
:param n_cluster_centers: number of cluster centers
:param cluster_method: cluster method
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
if cluster_method == 'approx':
kmeans = MiniBatchKMeans(n_clusters=n_cluster_centers, random_state=0, verbose=0)
else:
kmeans = KMeans(n_clusters=n_cluster_centers, random_state=0, n_jobs=1)
kmeans.fit(ice_lines[feature_grids])
cluster_plot_data = pd.DataFrame(kmeans.cluster_centers_, columns=feature_grids)
cluster_cmap = 'Blues'
if plot_params is not None:
if 'cluster_cmap' in plot_params.keys():
cluster_cmap = plot_params['cluster_cmap']
colors = plt.get_cmap(cluster_cmap)(np.linspace(0, 1, 20))[5:15]
for i in range(len(cluster_plot_data)):
y = list(cluster_plot_data[feature_grids].iloc[i].values)
ax.plot(x, y, linewidth=1, c=colors[i % 10])
def _pdp_interact_plot_title(pdp_interact_out, feature_names, ax,
multi_flag, which_class, only_inter, plot_params):
"""
Draw pdp interaction plot title
:param pdp_interact_out: instance of pdp_interact_obj
:param feature_name: name of the features
:param ax: axes to plot on
:param figsize: figure size
:param multi_flag: whether it is a subplot of a multi-classes plot
:param which_class: which class to plot
:param only_inter: whether only draw interaction plot
:param plot_params: values of plot parameters
"""
font_family = 'Arial'
title = 'Interaction PDP between %s and %s' % (feature_names[0], feature_names[1])
title_fontsize = 14
subtitle_fontsize = 12
if type(pdp_interact_out) == dict:
subtitle1 = 'Number of unique grid points of %s: %d' % (
feature_names[0], len(pdp_interact_out['class_0'].feature_grids[0]))
subtitle2 = 'Number of unique grid points of %s: %d' % (
feature_names[1], len(pdp_interact_out['class_0'].feature_grids[1]))
else:
subtitle1 = 'Number of unique grid points of %s: %d' % (
feature_names[0], len(pdp_interact_out.feature_grids[0]))
subtitle2 = 'Number of unique grid points of %s: %d' % (
feature_names[1], len(pdp_interact_out.feature_grids[1]))
if plot_params is not None:
if 'pdp_inter' in plot_params.keys():
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'title' in plot_params.keys():
title = plot_params['title']
if 'title_fontsize' in plot_params.keys():
title_fontsize = plot_params['title_fontsize']
if 'subtitle_fontsize' in plot_params.keys():
subtitle_fontsize = plot_params['subtitle_fontsize']
ax.set_facecolor('white')
if only_inter:
ax.text(0, 0.8, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
if multi_flag:
ax.text(0, 0.62, "For Class %d" % which_class, va="top", ha="left", fontsize=title_fontsize,
fontname=font_family)
ax.text(0, 0.45, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.3, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.55, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.4, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.6, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
if multi_flag:
ax.text(0, 0.53, "For Class %d" % which_class, va="top", ha="left", fontsize=title_fontsize,
fontname=font_family)
ax.text(0, 0.4, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.35, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.4, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.35, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.axis('off')
def _pdp_interact_plot(pdp_interact_out, feature_names, center, plot_org_pts, plot_lines, frac_to_plot, cluster,
n_cluster_centers, cluster_method, x_quantile, figsize, plot_params, multi_flag, which_class):
"""
Plot interaction plot
:param pdp_interact_out: instance of pdp_interact_obj
a calculated pdp_interact_obj instance
:param feature_names: list of feature names
:param center: boolean, default=True
whether to center the plot
:param plot_org_pts: boolean, default=False
whether to plot out the original points
:param plot_lines: boolean, default=False
whether to plot out the individual lines
:param frac_to_plot: float or integer, default=1
how many points or lines to plot, can be a integer or a float
:param cluster: boolean, default=False
whether to cluster the individual lines and only plot out the cluster centers
:param n_cluster_centers: integer, default=None
number of cluster centers
:param cluster_method: string, default=None
cluster method to use, default is KMeans, if 'approx' is passed, MiniBatchKMeans is used
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param figsize: figure size
:param plot_params: dict, default=None
values of plot parameters
:param multi_flag: boolean, default=False
whether it is a subplot of a multi-class plot
:param which_class: integer, default=None
must not be None under multi-class mode
"""
if figsize is None:
fig = plt.figure(figsize=(15, 15))
else:
fig = plt.figure(figsize=figsize)
pdp_plot_params = None
if plot_params is not None:
if 'pdp' in plot_params.keys():
pdp_plot_params = plot_params['pdp']
gs = GridSpec(2, 2)
ax0 = plt.subplot(gs[0, 0])
_pdp_interact_plot_title(pdp_interact_out=pdp_interact_out, feature_names=feature_names, ax=ax0,
multi_flag=multi_flag, which_class=which_class, only_inter=False, plot_params=plot_params)
ax1 = plt.subplot(gs[0, 1])
_pdp_plot(pdp_isolate_out=pdp_interact_out.pdp_isolate_out1, feature_name=feature_names[0], center=center,
plot_org_pts=plot_org_pts, plot_lines=plot_lines, frac_to_plot=frac_to_plot, cluster=cluster,
n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, x_quantile=x_quantile,
ax=ax1, plot_params=pdp_plot_params)
ax2 = plt.subplot(gs[1, 0])
_pdp_plot(pdp_isolate_out=pdp_interact_out.pdp_isolate_out2, feature_name=feature_names[1], center=center,
plot_org_pts=plot_org_pts, plot_lines=plot_lines, frac_to_plot=frac_to_plot, cluster=cluster,
n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, x_quantile=x_quantile, ax=ax2,
plot_params=pdp_plot_params)
ax3 = plt.subplot(gs[1, 1])
_pdp_contour_plot(pdp_interact_out=pdp_interact_out, feature_names=feature_names, x_quantile=x_quantile,
ax=ax3, fig=fig, plot_params=plot_params)
class ColorBarLocator(object):
def __init__(self, pax, pad=60, width=20):
self.pax = pax
self.pad = pad
self.width = width
def __call__(self, ax, renderer):
x, y, w, h = self.pax.get_position().bounds
fig = self.pax.get_figure()
inv_trans = fig.transFigure.inverted()
pad, _ = inv_trans.transform([self.pad, 0])
width, _ = inv_trans.transform([self.width, 0])
return [x, y - pad, w, width]
def _pdp_contour_plot(pdp_interact_out, feature_names, x_quantile, ax, fig, plot_params):
"""
Plot PDP contour
:param pdp_interact_out: instance of pdp_interact_obj
a calculated pdp_interact_obj instance
:param feature_names: list of feature names
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param ax: axes to plot on
:param fig: plt figure
:param plot_params: dict, default=None
values of plot parameters
"""
font_family = 'Arial'
contour_color = 'white'
contour_cmap = 'viridis'
xticks_rotation = 0
if plot_params is not None:
if 'pdp_inter' in plot_params.keys():
if 'contour_color' in plot_params['pdp_inter'].keys():
contour_color = plot_params['pdp_inter']['contour_color']
if 'contour_cmap' in plot_params['pdp_inter'].keys():
contour_cmap = plot_params['pdp_inter']['contour_cmap']
if 'font_family' in plot_params['pdp_inter'].keys():
font_family = plot_params['pdp_inter']['font_family']
if 'xticks_rotation' in plot_params.keys():
xticks_rotation = plot_params['xticks_rotation']
_axes_modify(font_family, ax)
feature_types = pdp_interact_out.feature_types
pdp = copy.deepcopy(pdp_interact_out.pdp)
new_feature_names = []
for i, feature_type in enumerate(feature_types):
if feature_type == 'onehot':
new_col = 'onehot_%d' % (i)
pdp[new_col] = pdp.apply(lambda x: list(x[pdp_interact_out.features[i]]).index(1), axis=1)
new_feature_names.append(new_col)
else:
new_feature_names.append(pdp_interact_out.features[i])
if (feature_types[0] == 'numeric') and x_quantile:
pdp[new_feature_names[0]] = pdp[new_feature_names[0]].apply(
lambda x: list(pdp_interact_out.feature_grids[0]).index(x))
if (feature_types[1] == 'numeric') and x_quantile:
pdp[new_feature_names[1]] = pdp[new_feature_names[1]].apply(
lambda x: list(pdp_interact_out.feature_grids[1]).index(x))
X, Y = np.meshgrid(pdp[new_feature_names[0]].unique(), pdp[new_feature_names[1]].unique())
Z = []
for i in range(X.shape[0]):
zs = []
for j in range(X.shape[1]):
x = X[i, j]
y = Y[i, j]
z = pdp[(pdp[new_feature_names[0]] == x) & (pdp[new_feature_names[1]] == y)]['preds'].values[0]
zs.append(z)
Z.append(zs)
Z = np.array(Z)
if feature_types[0] == 'onehot':
ax.set_xticks(range(X.shape[1]))
ax.set_xticklabels(pdp_interact_out.pdp_isolate_out1.display_columns, rotation=xticks_rotation)
elif feature_types[0] == 'binary':
ax.set_xticks([0, 1])
ax.set_xticklabels(pdp_interact_out.pdp_isolate_out1.display_columns, rotation=xticks_rotation)
else:
if x_quantile:
ax.set_xticks(range(len(pdp_interact_out.feature_grids[0])))
ax.set_xticklabels(pdp_interact_out.feature_grids[0], rotation=xticks_rotation)
if feature_types[1] == 'onehot':
ax.set_yticks(range(Y.shape[0]))
ax.set_yticklabels(pdp_interact_out.pdp_isolate_out2.display_columns)
elif feature_types[1] == 'binary':
ax.set_yticks([0, 1])
ax.set_yticklabels(pdp_interact_out.pdp_isolate_out2.display_columns)
else:
if x_quantile:
ax.set_yticks(range(len(pdp_interact_out.feature_grids[1])))
ax.set_yticklabels(pdp_interact_out.feature_grids[1])
level = np.min([X.shape[0], X.shape[1]])
c1 = ax.contourf(X, Y, Z, N=level, origin='lower', cmap=contour_cmap)
c2 = ax.contour(c1, levels=c1.levels, colors=contour_color, origin='lower')
ax.clabel(c2, contour_label_fontsize=9, inline=1)
ax.set_xlabel(feature_names[0], fontsize=10)
ax.set_ylabel(feature_names[1], fontsize=10)
ax.get_yaxis().tick_right()
if fig is not None:
cax = fig.add_axes([0, 0, 0, 0], axes_locator=ColorBarLocator(ax))
fig.colorbar(c1, cax=cax, orientation='horizontal')
|
[
"sklearn.cluster.KMeans",
"numpy.log10",
"sklearn.cluster.MiniBatchKMeans",
"numpy.min",
"numpy.max",
"numpy.array",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure",
"numpy.linspace",
"copy.deepcopy",
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.get_cmap"
] |
[((5131, 5171), 'copy.deepcopy', 'copy.deepcopy', (['pdp_isolate_out.ice_lines'], {}), '(pdp_isolate_out.ice_lines)\n', (5144, 5171), False, 'import copy\n'), ((5184, 5218), 'copy.deepcopy', 'copy.deepcopy', (['pdp_isolate_out.pdp'], {}), '(pdp_isolate_out.pdp)\n', (5197, 5218), False, 'import copy\n'), ((11826, 11886), 'pandas.DataFrame', 'pd.DataFrame', (['kmeans.cluster_centers_'], {'columns': 'feature_grids'}), '(kmeans.cluster_centers_, columns=feature_grids)\n', (11838, 11886), True, 'import pandas as pd\n'), ((17829, 17843), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(2)', '(2)'], {}), '(2, 2)\n', (17837, 17843), False, 'from matplotlib.gridspec import GridSpec\n'), ((17854, 17875), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (17865, 17875), True, 'import matplotlib.pyplot as plt\n'), ((18109, 18130), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 1]'], {}), '(gs[0, 1])\n', (18120, 18130), True, 'import matplotlib.pyplot as plt\n'), ((18517, 18538), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0]'], {}), '(gs[1, 0])\n', (18528, 18538), True, 'import matplotlib.pyplot as plt\n'), ((18925, 18946), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 1]'], {}), '(gs[1, 1])\n', (18936, 18946), True, 'import matplotlib.pyplot as plt\n'), ((20938, 20973), 'copy.deepcopy', 'copy.deepcopy', (['pdp_interact_out.pdp'], {}), '(pdp_interact_out.pdp)\n', (20951, 20973), False, 'import copy\n'), ((22157, 22168), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (22165, 22168), True, 'import numpy as np\n'), ((23212, 23244), 'numpy.min', 'np.min', (['[X.shape[0], X.shape[1]]'], {}), '([X.shape[0], X.shape[1]])\n', (23218, 23244), True, 'import numpy as np\n'), ((11597, 11669), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'n_cluster_centers', 'random_state': '(0)', 'verbose': '(0)'}), '(n_clusters=n_cluster_centers, random_state=0, verbose=0)\n', (11612, 11669), False, 'from sklearn.cluster import MiniBatchKMeans, KMeans\n'), ((11697, 11759), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_cluster_centers', 'random_state': '(0)', 'n_jobs': '(1)'}), '(n_clusters=n_cluster_centers, random_state=0, n_jobs=1)\n', (11703, 11759), False, 'from sklearn.cluster import MiniBatchKMeans, KMeans\n'), ((17589, 17617), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (17599, 17617), True, 'import matplotlib.pyplot as plt\n'), ((17642, 17669), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (17652, 17669), True, 'import matplotlib.pyplot as plt\n'), ((10889, 10912), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['line_cmap'], {}), '(line_cmap)\n', (10901, 10912), True, 'import matplotlib.pyplot as plt\n'), ((10913, 10934), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (10924, 10934), True, 'import numpy as np\n'), ((12066, 12092), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cluster_cmap'], {}), '(cluster_cmap)\n', (12078, 12092), True, 'import matplotlib.pyplot as plt\n'), ((12093, 12114), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (12104, 12114), True, 'import numpy as np\n'), ((10613, 10645), 'numpy.log10', 'np.log10', (['ice_plot_data.shape[0]'], {}), '(ice_plot_data.shape[0])\n', (10621, 10645), True, 'import numpy as np\n'), ((10683, 10715), 'numpy.log10', 'np.log10', (['ice_plot_data.shape[0]'], {}), '(ice_plot_data.shape[0])\n', (10691, 10715), True, 'import numpy as np\n'), ((9290, 9303), 'numpy.min', 'np.min', (['lower'], {}), '(lower)\n', (9296, 9303), True, 'import numpy as np\n'), ((9322, 9335), 'numpy.max', 'np.max', (['upper'], {}), '(upper)\n', (9328, 9335), True, 'import numpy as np\n')]
|
import os
import pytest
from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer
def get_test_directory():
return os.path.dirname(os.path.abspath(__file__)) + "/../test_documents/"
@pytest.mark.skip
def test_html_rollup():
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news.kdxa'), 'rb').read())
# before rollup
assert document.select('//a')[0].content == 'HSBC'
assert document.select('//a')[1].content == 'Hang Seng Index'
assert len(document.select('//*[contentRegex(".*Hang Seng Index.*")]')[0].get_content_parts()) == 1
# Collapse out all the <a> tags
step = RollupTransformer(collapse_type_res=["a"])
step.process(document)
# after rollup
assert len(document.select('//a')) == 0
# see where the href rolled up
assert document.select('//*[contentRegex(".*Hang Seng Index.*")]')[0].get_all_content() == 'The London-headquartered bank is a heavyweight component of the Hang Seng Index . HSBC shares in Hong Kong closed 2.78% lower.'
assert len(document.select('//*[contentRegex(".*Hang Seng Index.*")]')[0].get_content_parts()) == 3
def test_tag_key_value():
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store')
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 45
assert context.get_store('test_store').rows[14][0] == 'LOC'
assert context.get_store('test_store').rows[14][1] == 'Europe'
def test_tag_key_value_include_exclude():
# Testing include parameter
include_tags = ['DATE', 'LOC']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', include=include_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 11
# Testing exclude parameter
exclude_tags = ['DATE', 'LOC']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', exclude=exclude_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 34
# Testing both include and exclude parameters
include_tags = ['LOC']
exclude_tags = ['DATE']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', include=include_tags, exclude=exclude_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 5
# Testing both include - this should be the same as before as 'exclude' shouldn't have really done anything
include_tags = ['LOC']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', include=include_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 5
@pytest.mark.skip
def test_rollup_of_pdf():
# first test - collapsing words and lines up to their common parent
test_doc = Document.from_kdxa(get_test_directory() + '20200709.kdxa')
# how many pre-rollup lines?
assert len(test_doc.select('//line')) == 3824
# how many pre-rollup words?
assert len(test_doc.select('//word')) == 52903
# how many pre-rollup content-areas?
assert len(test_doc.select('//content-area')) == 817
# what is the pre-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
rollup_pipeline = Pipeline(test_doc)
rollup_pipeline.add_step(RollupTransformer(collapse_type_res=["word", "line"], separator_character=' '))
rollup_pipeline.run()
collapsed_doc = rollup_pipeline.context.output_document
# how many post-rollup lines?
assert len(test_doc.select('//line')) == 0
# how many post-rollup words?
assert len(test_doc.select('//word')) == 0
# how many post-rollup content-areas?
assert len(test_doc.select('//content-area')) == 817
# what is the post-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
assert len(collapsed_doc.select("//content-area")[12].get_all_content()) == 235
# second test - just collapse the line up to its parent (content-area) - roll up the line's children
test_doc = Document.from_kdxa(get_test_directory() + '20200709.kdxa')
rollup_pipeline = Pipeline(test_doc)
rollup_pipeline.add_step(
RollupTransformer(collapse_type_res=["line"], separator_character=' ', get_all_content=True))
rollup_pipeline.run()
collapsed_doc = rollup_pipeline.context.output_document
# how many post-rollup lines?
assert len(test_doc.select('//line')) == 0
# how many post-rollup words?
assert len(test_doc.select('//word')) == 0
# how many post-rollup content-areas?
assert len(test_doc.select('//content-area')) == 817
# what is the post-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
# verify that we can collapse line nodes AND include their children
assert len(collapsed_doc.select("//content-area")[12].get_all_content()) == 235
# third test - select specific nodes in which we'll do the roll ups
test_doc = Document.from_kdxa(get_test_directory() + '20200709.kdxa')
node_selector = "//content-area[contentRegex('.*LOAN AGREEMENT.*', true)]"
# verify we have 3 nodes match this selector
node_matches = test_doc.select(node_selector)
assert len(node_matches) == 3
# before we rollup, let's make sure the matching nodes conform to known expectations
assert len(node_matches[0].select('//word')) == 2
assert len(node_matches[0].select('//line')) == 1
assert len(node_matches[0].select('//content-area')) == 1
assert len(node_matches[0].get_all_content()) == 14
assert len(node_matches[1].select('//word')) == 2
assert len(node_matches[1].select('//line')) == 1
assert len(node_matches[1].select('//content-area')) == 1
assert len(node_matches[1].get_all_content()) == 14
assert len(node_matches[2].select('//word')) == 71
assert len(node_matches[2].select('//line')) == 6
assert len(node_matches[2].select('//content-area')) == 1
assert len(node_matches[2].get_all_content()) == 500
rollup_pipeline = Pipeline(test_doc)
rollup_pipeline.add_step(RollupTransformer(selector="//content-area[contentRegex('.*LOAN AGREEMENT.*', true)]",
collapse_type_res=["line"], separator_character=' ',
get_all_content=True))
rollup_pipeline.run()
collapsed_doc = rollup_pipeline.context.output_document
# check those matching nodes - we shouldn't have any words or lines, but
# all other node_types should exist and the content should stay the same.
assert len(node_matches[0].select('//word')) == 0
assert len(node_matches[0].select('//line')) == 0
assert len(node_matches[0].select('//content-area')) == 1
assert len(node_matches[0].get_all_content()) == 14
assert len(node_matches[1].select('//word')) == 0
assert len(node_matches[1].select('//line')) == 0
assert len(node_matches[1].select('//content-area')) == 1
assert len(node_matches[1].get_all_content()) == 14
assert len(node_matches[2].select('//word')) == 0
assert len(node_matches[2].select('//line')) == 0
assert len(node_matches[2].select('//content-area')) == 1
assert len(node_matches[2].get_all_content()) == 500
# how many post-rollup lines? (still have some lines, but fewer than we started with)
assert len(test_doc.select('//line')) == 3816
# how many post-rollup words? (still have some words, but fewer than we started with)
assert len(test_doc.select('//word')) == 52828
# how many post-rollup content-areas? (same number of content-areas)
assert len(test_doc.select('//content-area')) == 817
# what is the post-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
# verify that we can collapse line nodes AND include their children
assert len(collapsed_doc.select("//content-area")[12].get_all_content()) == 235
|
[
"kodexa.RollupTransformer",
"os.path.abspath",
"kodexa.PipelineContext",
"kodexa.TagsToKeyValuePairExtractor",
"kodexa.Pipeline"
] |
[((678, 720), 'kodexa.RollupTransformer', 'RollupTransformer', ([], {'collapse_type_res': "['a']"}), "(collapse_type_res=['a'])\n", (695, 720), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((1327, 1379), 'kodexa.TagsToKeyValuePairExtractor', 'TagsToKeyValuePairExtractor', ([], {'store_name': '"""test_store"""'}), "(store_name='test_store')\n", (1354, 1379), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((1394, 1411), 'kodexa.PipelineContext', 'PipelineContext', ([], {}), '()\n', (1409, 1411), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((1871, 1945), 'kodexa.TagsToKeyValuePairExtractor', 'TagsToKeyValuePairExtractor', ([], {'store_name': '"""test_store"""', 'include': 'include_tags'}), "(store_name='test_store', include=include_tags)\n", (1898, 1945), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((1960, 1977), 'kodexa.PipelineContext', 'PipelineContext', ([], {}), '()\n', (1975, 1977), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((2262, 2336), 'kodexa.TagsToKeyValuePairExtractor', 'TagsToKeyValuePairExtractor', ([], {'store_name': '"""test_store"""', 'exclude': 'exclude_tags'}), "(store_name='test_store', exclude=exclude_tags)\n", (2289, 2336), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((2351, 2368), 'kodexa.PipelineContext', 'PipelineContext', ([], {}), '()\n', (2366, 2368), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((2691, 2791), 'kodexa.TagsToKeyValuePairExtractor', 'TagsToKeyValuePairExtractor', ([], {'store_name': '"""test_store"""', 'include': 'include_tags', 'exclude': 'exclude_tags'}), "(store_name='test_store', include=include_tags,\n exclude=exclude_tags)\n", (2718, 2791), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((2802, 2819), 'kodexa.PipelineContext', 'PipelineContext', ([], {}), '()\n', (2817, 2819), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((3175, 3249), 'kodexa.TagsToKeyValuePairExtractor', 'TagsToKeyValuePairExtractor', ([], {'store_name': '"""test_store"""', 'include': 'include_tags'}), "(store_name='test_store', include=include_tags)\n", (3202, 3249), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((3264, 3281), 'kodexa.PipelineContext', 'PipelineContext', ([], {}), '()\n', (3279, 3281), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((3991, 4009), 'kodexa.Pipeline', 'Pipeline', (['test_doc'], {}), '(test_doc)\n', (3999, 4009), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((4893, 4911), 'kodexa.Pipeline', 'Pipeline', (['test_doc'], {}), '(test_doc)\n', (4901, 4911), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((6843, 6861), 'kodexa.Pipeline', 'Pipeline', (['test_doc'], {}), '(test_doc)\n', (6851, 6861), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((4039, 4117), 'kodexa.RollupTransformer', 'RollupTransformer', ([], {'collapse_type_res': "['word', 'line']", 'separator_character': '""" """'}), "(collapse_type_res=['word', 'line'], separator_character=' ')\n", (4056, 4117), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((4950, 5046), 'kodexa.RollupTransformer', 'RollupTransformer', ([], {'collapse_type_res': "['line']", 'separator_character': '""" """', 'get_all_content': '(True)'}), "(collapse_type_res=['line'], separator_character=' ',\n get_all_content=True)\n", (4967, 5046), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((6891, 7061), 'kodexa.RollupTransformer', 'RollupTransformer', ([], {'selector': '"""//content-area[contentRegex(\'.*LOAN AGREEMENT.*\', true)]"""', 'collapse_type_res': "['line']", 'separator_character': '""" """', 'get_all_content': '(True)'}), '(selector=\n "//content-area[contentRegex(\'.*LOAN AGREEMENT.*\', true)]",\n collapse_type_res=[\'line\'], separator_character=\' \', get_all_content=True)\n', (6908, 7061), False, 'from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer\n'), ((184, 209), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (199, 209), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Injector.
A partir de um arquivo binario, de uma tabela binaria gerada com o Finder,
e um arquivo de substituição, o Injector é capaz de injetar um texto
no binario trocando o texto in-game
O Injector faz automaticamente a adequação do tamanho do texto ao tamanho da caixa,
truncando se maior e colocando corretamente as quebras de linha
@author <NAME>
"""
from __future__ import print_function
import os
import sys
import binascii
import pickle
class Injector:
def __init__(self, sfc, tbl, substituto):
self.sfc = sfc
self.tbl = tbl
self.substituto = substituto
self.bytefile = None
self.dictionary = None
self.inv_dictionary = None
self.offset = 0
"""
pega o arquivo e retorna seus bytes em um array de bytes
"""
def fileToByteArray(self):
with open(self.sfc, 'rb') as f:
hexdata = binascii.hexlify(f.read())
self.bytefile = map(''.join, zip(hexdata[::2], hexdata[1::2]))
"""
Lê a tabela binaria de conversao
"""
def readBinaryTbl(self):
with open(self.tbl, 'rb') as btblobj:
self.dictionary = pickle.Unpickler(btblobj).load()
self.offset = self.dictionary["offset"]
del self.dictionary["offset"]
self.inv_dictionary = {v: k for k, v in self.dictionary.items()}
def inject(self):
_txt = []
char_count = 0
with open(self.substituto, "r") as _txtfile:
_txt = _txtfile.read().replace('\n', '')
for numero_linha in xrange(1, 9):
for numero_coluna in xrange(1, 18):
try:
self.bytefile[self.offset] = self.inv_dictionary[_txt[char_count]]
if numero_coluna is 18:
self.bytefile[self.offset] = self.inv_dictionary[_txt[char_count]+"\n"]
except IndexError:
pass
char_count = char_count + 1
self.offset = self.offset + 1
# with open(self.sfc.replace(".sfc", ".modified.sfc"), "wb") as sfc_file:
sfc_file = open(self.sfc.replace(".sfc", ".modified.sfc"), "wb")
for byte in self.bytefile:
sfc_file.write(
binascii.unhexlify(byte)
)
"""
Entry-point da classe
"""
def run(self):
self.fileToByteArray()
self.readBinaryTbl()
self.inject()
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Use: python extractor.py [sfc] [tbl] [substituto]")
sys.exit(1)
sfc = sys.argv[1]
tbl = sys.argv[2]
substituto = sys.argv[3]
if os.path.exists(sfc) and os.path.isfile(tbl):
inj = Injector(sfc, tbl, substituto)
inj.run()
|
[
"os.path.exists",
"sys.exit",
"os.path.isfile",
"pickle.Unpickler",
"binascii.unhexlify"
] |
[((2675, 2686), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2683, 2686), False, 'import sys\n'), ((2775, 2794), 'os.path.exists', 'os.path.exists', (['sfc'], {}), '(sfc)\n', (2789, 2794), False, 'import os\n'), ((2799, 2818), 'os.path.isfile', 'os.path.isfile', (['tbl'], {}), '(tbl)\n', (2813, 2818), False, 'import os\n'), ((2344, 2368), 'binascii.unhexlify', 'binascii.unhexlify', (['byte'], {}), '(byte)\n', (2362, 2368), False, 'import binascii\n'), ((1220, 1245), 'pickle.Unpickler', 'pickle.Unpickler', (['btblobj'], {}), '(btblobj)\n', (1236, 1245), False, 'import pickle\n')]
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Cudnn(Package):
"""NVIDIA cuDNN is a GPU-accelerated library of primitives for deep
neural networks"""
homepage = "https://developer.nvidia.com/cudnn"
# Latest versions available at:
# https://developer.nvidia.com/rdp/cudnn-download
# Archived versions available at:
# https://developer.nvidia.com/rdp/cudnn-archive
# Note that download links don't work from command line,
# need to use modified URLs like in url_for_version.
maintainers = ['adamjstewart']
# cuDNN 8.0.2
version('8.0.2.39-11.0-linux-x64',
sha256='672f46288b8edd98f8d156a4f1ff518201ca6de0cff67915ceaa37f6d6d86345')
version('8.0.2.39-11.0-linux-ppc64le',
sha256='b7c1ce5b1191eb007ba3455ea5f497fdce293a646545d8a6ed93e9bb06d7f057')
version('8.0.2.39-10.2-linux-x64',
sha256='c9cbe5c211360f3cfbc0fb104f0e9096b37e53f89392525679f049276b2f701f')
version('8.0.2.39-10.2-linux-ppc64le',
sha256='c32325ff84a8123491f2e58b3694885a9a672005bc21764b38874688c0e43262')
version('8.0.2.39-10.1-linux-x64',
sha256='82148a68bd6bdaab93af5e05bb1842b8ccb3ab7de7bed41f609a7616c102213d')
version('8.0.2.39-10.1-linux-ppc64le',
sha256='8196ec4f031356317baeccefbc4f61c8fccb2cf0bdef0a6431438918ddf68fb9')
# cuDNN 8.0
version('8.0.0.180-11.0-linux-x64',
sha256='9e75ea70280a77de815e0bdc85d08b67e081bc99a708b574092142344d2ba07e')
version('8.0.0.180-11.0-linux-ppc64le',
sha256='1229e94731bbca63ee7f5a239f4e1838a51a301d896f3097fbf7377d74704060')
version('8.0.0.180-10.2-linux-x64',
sha256='0c87c12358ee2b99d57c2a8c7560e3bb93e54bb929f5f8bec4964a72a2bb261d')
version('8.0.0.180-10.2-linux-ppc64le',
sha256='59e4ad6db15fcc374976e8052fe39e3f30f34079710fb3c7751a64c853d9243f')
# cuDNN 7.6.5
version('7.6.5.32-10.2-linux-x64',
sha256='600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20',
preferred=True)
version('7.6.5.32-10.2-linux-ppc64le',
sha256='7dc08b6ab9331bfd12207d4802c61db1ad7cace7395b67a6e7b16efa0335668b')
version('7.6.5.32-10.1-linux-x64',
sha256='7eaec8039a2c30ab0bc758d303588767693def6bf49b22485a2c00bf2e136cb3')
version('7.6.5.32-10.1-osx-x64',
sha256='8ecce28a5ed388a2b9b2d239e08d7c550f53b79288e6d9e5eb4c152bfc711aff')
version('7.6.5.32-10.1-linux-ppc64le',
sha256='97b2faf73eedfc128f2f5762784d21467a95b2d5ba719825419c058f427cbf56')
version('7.6.5.32-10.0-linux-x64',
sha256='28355e395f0b2b93ac2c83b61360b35ba6cd0377e44e78be197b6b61b4b492ba')
version('7.6.5.32-10.0-osx-x64',
sha256='6fa0b819374da49102e285ecf7fcb8879df4d0b3cc430cc8b781cdeb41009b47')
version('7.6.5.32-10.0-linux-ppc64le',
sha256='b1717f4570083bbfc6b8b59f280bae4e4197cc1cb50e9d873c05adf670084c5b')
version('7.6.5.32-9.2-linux-x64',
sha256='a2a2c7a8ba7b16d323b651766ee37dcfdbc2b50d920f73f8fde85005424960e4')
version('7.6.5.32-9.2-linux-ppc64le',
sha256='a11f44f9a827b7e69f527a9d260f1637694ff7c1674a3e46bd9ec054a08f9a76')
version('7.6.5.32-9.0-linux-x64',
sha256='bd0a4c0090d5b02feec3f195738968690cc2470b9bc6026e6fe8ff245cd261c8')
# cuDNN 7.6.4
version('7.6.4.38-10.1-linux-x64',
sha256='32091d115c0373027418620a09ebec3658a6bc467d011de7cdd0eb07d644b099')
version('7.6.4.38-10.1-osx-x64',
sha256='bfced062c3689ced2c1fb49c7d5052e6bc3da6974c1eb707e4dcf8cd209d4236')
version('7.6.4.38-10.1-linux-ppc64le',
sha256='f3615fea50986a4dfd05d7a0cf83396dfdceefa9c209e8bf9691e20a48e420ce')
version('7.6.4.38-10.0-linux-x64',
sha256='417bb5daf51377037eb2f5c87649000ca1b9cec0acb16cfe07cb1d3e9a961dbf')
version('7.6.4.38-10.0-osx-x64',
sha256='af01ab841caec25087776a6b8fc7782883da12e590e24825ad1031f9ae0ed4b1')
version('7.6.4.38-10.0-linux-ppc64le',
sha256='c1725ad6bd7d7741e080a1e6da4b62eac027a94ac55c606cce261e3f829400bb')
version('7.6.4.38-9.2-linux-x64',
sha256='c79156531e641289b6a6952888b9637059ef30defd43c3cf82acf38d67f60a27')
version('7.6.4.38-9.2-linux-ppc64le',
sha256='98d8aae2dcd851558397a9a30b73242f257e1556be17c83650e63a0685969884')
version('7.6.4.38-9.0-linux-x64',
sha256='8db78c3623c192d4f03f3087b41c32cb0baac95e13408b5d9dabe626cb4aab5d')
# cuDNN 7.6.3
version('7.6.3.30-10.1-linux-x64',
sha256='352557346d8111e2f954c494be1a90207103d316b8777c33e62b3a7f7b708961')
version('7.6.3.30-10.1-linux-ppc64le',
sha256='f274735a8fc31923d3623b1c3d2b1d0d35bb176687077c6a4d4353c6b900d8ee')
# cuDNN 7.5.1
version('7.5.1.10-10.1-linux-x64',
sha256='2c833f43c9147d9a25a20947a4c5a5f5c33b2443240fd767f63b330c482e68e0')
version('7.5.1.10-10.1-linux-ppc64le',
sha256='a9e23bc83c970daec20874ccd1d8d80b648adf15440ecd0164818b330b1e2663')
version('7.5.1.10-10.0-linux-x64',
sha256='c0a4ec438920aa581dd567117b9c316745b4a451ac739b1e04939a3d8b229985')
version('7.5.1.10-10.0-linux-ppc64le',
sha256='d9205718da5fbab85433476f9ff61fcf4b889d216d6eea26753bbc24d115dd70')
# cuDNN 7.5.0
version('7.5.0.56-10.1-linux-x64',
sha256='c31697d6b71afe62838ad2e57da3c3c9419c4e9f5635d14b683ebe63f904fbc8')
version('7.5.0.56-10.1-linux-ppc64le',
sha256='15415eb714ab86ab6c7531f2cac6474b5dafd989479b062776c670b190e43638')
version('7.5.0.56-10.0-linux-x64',
sha256='701097882cb745d4683bb7ff6c33b8a35c7c81be31bac78f05bad130e7e0b781')
version('7.5.0.56-10.0-linux-ppc64le',
sha256='f0c1cbd9de553c8e2a3893915bd5fff57b30e368ef4c964d783b6a877869e93a')
# cuDNN 7.3.0
version('7.3.0.29-9.0-linux-x64',
sha256='403f9043ff2c7b2c5967454872275d07bca11fd41dfc7b21995eadcad6dbe49b')
# cuDNN 7.2.1
version('7.2.1.38-9.0-linux-x64',
sha256='cf007437b9ac6250ec63b89c25f248d2597fdd01369c80146567f78e75ce4e37')
# cuDNN 7.1.3
version('7.1.3-9.1-linux-x64',
sha256='dd616d3794167ceb923d706bf73e8d6acdda770751492b921ee6827cdf190228')
version('7.1.3-9.1-linux-ppc64le',
sha256='e3b4837f711b98a52faacc872a68b332c833917ef3cf87c0108f1d01af9b2931')
# cuDNN 6.0
version('6.0-8.0-linux-x64',
sha256='9b09110af48c9a4d7b6344eb4b3e344daa84987ed6177d5c44319732f3bb7f9c')
# cuDNN 5.1
version('5.1-8.0-linux-x64',
sha256='c10719b36f2dd6e9ddc63e3189affaa1a94d7d027e63b71c3f64d449ab0645ce')
# CUDA 10.2
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.2-linux-x64')
# CUDA 10.1
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.1-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.1-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.1-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.1-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.1-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.1-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.6.3.30-10.1-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.3.30-10.1-linux-ppc64le')
depends_on('[email protected]:10.1.999', when='@7.5.0.56-10.1-linux-x64')
depends_on('[email protected]:10.1.999', when='@7.5.0.56-10.1-linux-ppc64le')
# CUDA 10.0
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.0-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.0-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.0-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.0-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.5.1.10-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.5.1.10-10.0-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.5.0.56-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.5.0.56-10.0-linux-ppc64le')
# CUDA 9.2
depends_on('[email protected]:9.2.999', when='@7.6.5.32-9.2-linux-x64')
depends_on('[email protected]:9.2.999', when='@7.6.5.32-9.2-linux-ppc64le')
depends_on('[email protected]:9.2.999', when='@7.6.4.38-9.2-linux-x64')
depends_on('[email protected]:9.2.999', when='@7.6.4.38-9.2-linux-ppc64le')
# CUDA 9.1
depends_on('[email protected]:9.1.999', when='@7.1.3-9.1-linux-x64')
depends_on('[email protected]:9.1.999', when='@7.1.3-9.1-linux-ppc64le')
# CUDA 9.0
depends_on('[email protected]:9.0.999', when='@7.6.5.32-9.0-linux-x64')
depends_on('[email protected]:9.0.999', when='@7.6.4.38-9.0-linux-x64')
depends_on('[email protected]:9.0.999', when='@7.3.0.29-9.0-linux-x64')
depends_on('[email protected]:9.0.999', when='@7.2.1.38-9.0-linux-x64')
# CUDA 8.0
depends_on('[email protected]:8.0.999', when='@6.0-8.0-linux-x64')
depends_on('[email protected]:8.0.999', when='@5.1-8.0-linux-x64')
def url_for_version(self, version):
url = 'https://developer.download.nvidia.com/compute/redist/cudnn/v{0}/cudnn-{1}-v{2}.tgz'
if version >= Version('7.2'):
directory = version[:3]
ver = version[:4]
cuda = version[4:]
elif version >= Version('7.1'):
directory = version[:3]
ver = version[:2]
cuda = version[3:]
elif version >= Version('7.0'):
directory = version[:3]
ver = version[0]
cuda = version[3:]
else:
directory = version[:2]
ver = version[:2]
cuda = version[2:]
return url.format(directory, cuda, ver)
def setup_run_environment(self, env):
if 'target=ppc64le: platform=linux' in self.spec:
env.set('cuDNN_ROOT', os.path.join(
self.prefix, 'targets', 'ppc64le-linux'))
def install(self, spec, prefix):
install_tree('.', prefix)
if 'target=ppc64le: platform=linux' in spec:
symlink(os.path.join(prefix, 'targets', 'ppc64le-linux', 'lib'),
prefix.lib)
symlink(
os.path.join(prefix, 'targets', 'ppc64le-linux', 'include'),
prefix.include)
|
[
"os.path.join"
] |
[((10119, 10172), 'os.path.join', 'os.path.join', (['self.prefix', '"""targets"""', '"""ppc64le-linux"""'], {}), "(self.prefix, 'targets', 'ppc64le-linux')\n", (10131, 10172), False, 'import os\n'), ((10337, 10392), 'os.path.join', 'os.path.join', (['prefix', '"""targets"""', '"""ppc64le-linux"""', '"""lib"""'], {}), "(prefix, 'targets', 'ppc64le-linux', 'lib')\n", (10349, 10392), False, 'import os\n'), ((10463, 10522), 'os.path.join', 'os.path.join', (['prefix', '"""targets"""', '"""ppc64le-linux"""', '"""include"""'], {}), "(prefix, 'targets', 'ppc64le-linux', 'include')\n", (10475, 10522), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""Graphical monitor of Celery events using curses."""
from __future__ import absolute_import, print_function, unicode_literals
import curses
import sys
import threading
from datetime import datetime
from itertools import count
from textwrap import wrap
from time import time
from math import ceil
from celery import VERSION_BANNER
from celery import states
from celery.app import app_or_default
from celery.five import items, values
from celery.utils.text import abbr, abbrtask
__all__ = ['CursesMonitor', 'evtop']
BORDER_SPACING = 4
LEFT_BORDER_OFFSET = 3
UUID_WIDTH = 36
STATE_WIDTH = 8
TIMESTAMP_WIDTH = 8
MIN_WORKER_WIDTH = 15
MIN_TASK_WIDTH = 16
# this module is considered experimental
# we don't care about coverage.
STATUS_SCREEN = """\
events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all}
"""
class CursesMonitor(object): # pragma: no cover
"""A curses based Celery task monitor."""
keymap = {}
win = None
screen_delay = 10
selected_task = None
selected_position = 0
selected_str = 'Selected: '
foreground = curses.COLOR_BLACK
background = curses.COLOR_WHITE
online_str = 'Workers online: '
help_title = 'Keys: '
help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit')
greet = 'celery events {0}'.format(VERSION_BANNER)
info_str = 'Info: '
def __init__(self, state, app, keymap=None):
self.app = app
self.keymap = keymap or self.keymap
self.state = state
default_keymap = {
'J': self.move_selection_down,
'K': self.move_selection_up,
'C': self.revoke_selection,
'T': self.selection_traceback,
'R': self.selection_result,
'I': self.selection_info,
'L': self.selection_rate_limit,
}
self.keymap = dict(default_keymap, **self.keymap)
self.lock = threading.RLock()
def format_row(self, uuid, task, worker, timestamp, state):
mx = self.display_width
# include spacing
detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH
uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH
if uuid_space < UUID_WIDTH:
uuid_width = uuid_space
else:
uuid_width = UUID_WIDTH
detail_width = detail_width - uuid_width - 1
task_width = int(ceil(detail_width / 2.0))
worker_width = detail_width - task_width - 1
uuid = abbr(uuid, uuid_width).ljust(uuid_width)
worker = abbr(worker, worker_width).ljust(worker_width)
task = abbrtask(task, task_width).ljust(task_width)
state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
timestamp = timestamp.ljust(TIMESTAMP_WIDTH)
row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task,
timestamp, state)
if self.screen_width is None:
self.screen_width = len(row[:mx])
return row[:mx]
@property
def screen_width(self):
_, mx = self.win.getmaxyx()
return mx
@property
def screen_height(self):
my, _ = self.win.getmaxyx()
return my
@property
def display_width(self):
_, mx = self.win.getmaxyx()
return mx - BORDER_SPACING
@property
def display_height(self):
my, _ = self.win.getmaxyx()
return my - 10
@property
def limit(self):
return self.display_height
def find_position(self):
if not self.tasks:
return 0
for i, e in enumerate(self.tasks):
if self.selected_task == e[0]:
return i
return 0
def move_selection_up(self):
self.move_selection(-1)
def move_selection_down(self):
self.move_selection(1)
def move_selection(self, direction=1):
if not self.tasks:
return
pos = self.find_position()
try:
self.selected_task = self.tasks[pos + direction][0]
except IndexError:
self.selected_task = self.tasks[0][0]
keyalias = {curses.KEY_DOWN: 'J',
curses.KEY_UP: 'K',
curses.KEY_ENTER: 'I'}
def handle_keypress(self):
try:
key = self.win.getkey().upper()
except Exception: # pylint: disable=broad-except
return
key = self.keyalias.get(key) or key
handler = self.keymap.get(key)
if handler is not None:
handler()
def alert(self, callback, title=None):
self.win.erase()
my, mx = self.win.getmaxyx()
y = blank_line = count(2)
if title:
self.win.addstr(next(y), 3, title,
curses.A_BOLD | curses.A_UNDERLINE)
next(blank_line)
callback(my, mx, next(y))
self.win.addstr(my - 1, 0, 'Press any key to continue...',
curses.A_BOLD)
self.win.refresh()
while 1:
try:
return self.win.getkey().upper()
except Exception: # pylint: disable=broad-except
pass
def selection_rate_limit(self):
if not self.selected_task:
return curses.beep()
task = self.state.tasks[self.selected_task]
if not task.name:
return curses.beep()
my, mx = self.win.getmaxyx()
r = 'New rate limit: '
self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE)
self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r)))
rlimit = self.readline(my - 2, 3 + len(r))
if rlimit:
reply = self.app.control.rate_limit(task.name,
rlimit.strip(), reply=True)
self.alert_remote_control_reply(reply)
def alert_remote_control_reply(self, reply):
def callback(my, mx, xs):
y = count(xs)
if not reply:
self.win.addstr(
next(y), 3, 'No replies received in 1s deadline.',
curses.A_BOLD + curses.color_pair(2),
)
return
for subreply in reply:
curline = next(y)
host, response = next(items(subreply))
host = '{0}: '.format(host)
self.win.addstr(curline, 3, host, curses.A_BOLD)
attr = curses.A_NORMAL
text = ''
if 'error' in response:
text = response['error']
attr |= curses.color_pair(2)
elif 'ok' in response:
text = response['ok']
attr |= curses.color_pair(3)
self.win.addstr(curline, 3 + len(host), text, attr)
return self.alert(callback, 'Remote Control Command Replies')
def readline(self, x, y):
buffer = str()
curses.echo()
try:
i = 0
while 1:
ch = self.win.getch(x, y + i)
if ch != -1:
if ch in (10, curses.KEY_ENTER): # enter
break
if ch in (27,):
buffer = str()
break
buffer += chr(ch)
i += 1
finally:
curses.noecho()
return buffer
def revoke_selection(self):
if not self.selected_task:
return curses.beep()
reply = self.app.control.revoke(self.selected_task, reply=True)
self.alert_remote_control_reply(reply)
def selection_info(self):
if not self.selected_task:
return
def alert_callback(mx, my, xs):
my, mx = self.win.getmaxyx()
y = count(xs)
task = self.state.tasks[self.selected_task]
info = task.info(extra=['state'])
infoitems = [
('args', info.pop('args', None)),
('kwargs', info.pop('kwargs', None))
] + list(info.items())
for key, value in infoitems:
if key is None:
continue
value = str(value)
curline = next(y)
keys = key + ': '
self.win.addstr(curline, 3, keys, curses.A_BOLD)
wrapped = wrap(value, mx - 2)
if len(wrapped) == 1:
self.win.addstr(
curline, len(keys) + 3,
abbr(wrapped[0],
self.screen_width - (len(keys) + 3)))
else:
for subline in wrapped:
nexty = next(y)
if nexty >= my - 1:
subline = ' ' * 4 + '[...]'
elif nexty >= my:
break
self.win.addstr(
nexty, 3,
abbr(' ' * 4 + subline, self.screen_width - 4),
curses.A_NORMAL,
)
return self.alert(
alert_callback, 'Task details for {0.selected_task}'.format(self),
)
def selection_traceback(self):
if not self.selected_task:
return curses.beep()
task = self.state.tasks[self.selected_task]
if task.state not in states.EXCEPTION_STATES:
return curses.beep()
def alert_callback(my, mx, xs):
y = count(xs)
for line in task.traceback.split('\n'):
self.win.addstr(next(y), 3, line)
return self.alert(
alert_callback,
'Task Exception Traceback for {0.selected_task}'.format(self),
)
def selection_result(self):
if not self.selected_task:
return
def alert_callback(my, mx, xs):
y = count(xs)
task = self.state.tasks[self.selected_task]
result = (getattr(task, 'result', None) or
getattr(task, 'exception', None))
for line in wrap(result or '', mx - 2):
self.win.addstr(next(y), 3, line)
return self.alert(
alert_callback,
'Task Result for {0.selected_task}'.format(self),
)
def display_task_row(self, lineno, task):
state_color = self.state_colors.get(task.state)
attr = curses.A_NORMAL
if task.uuid == self.selected_task:
attr = curses.A_STANDOUT
timestamp = datetime.utcfromtimestamp(
task.timestamp or time(),
)
timef = timestamp.strftime('%H:%M:%S')
hostname = task.worker.hostname if task.worker else '*NONE*'
line = self.format_row(task.uuid, task.name,
hostname,
timef, task.state)
self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr)
if state_color:
self.win.addstr(lineno,
len(line) - STATE_WIDTH + BORDER_SPACING - 1,
task.state, state_color | attr)
def draw(self):
with self.lock:
win = self.win
self.handle_keypress()
x = LEFT_BORDER_OFFSET
y = blank_line = count(2)
my, mx = win.getmaxyx()
win.erase()
win.bkgd(' ', curses.color_pair(1))
win.border()
win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5))
next(blank_line)
win.addstr(next(y), x, self.format_row('UUID', 'TASK',
'WORKER', 'TIME', 'STATE'),
curses.A_BOLD | curses.A_UNDERLINE)
tasks = self.tasks
if tasks:
for row, (uuid, task) in enumerate(tasks):
if row > self.display_height:
break
if task.uuid:
lineno = next(y)
self.display_task_row(lineno, task)
# -- Footer
next(blank_line)
win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4)
# Selected Task Info
if self.selected_task:
win.addstr(my - 5, x, self.selected_str, curses.A_BOLD)
info = 'Missing extended info'
detail = ''
try:
selection = self.state.tasks[self.selected_task]
except KeyError:
pass
else:
info = selection.info()
if 'runtime' in info:
info['runtime'] = '{0:.2f}'.format(info['runtime'])
if 'result' in info:
info['result'] = abbr(info['result'], 16)
info = ' '.join(
'{0}={1}'.format(key, value)
for key, value in items(info)
)
detail = '... -> key i'
infowin = abbr(info,
self.screen_width - len(self.selected_str) - 2,
detail)
win.addstr(my - 5, x + len(self.selected_str), infowin)
# Make ellipsis bold
if detail in infowin:
detailpos = len(infowin) - len(detail)
win.addstr(my - 5, x + len(self.selected_str) + detailpos,
detail, curses.A_BOLD)
else:
win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL)
# Workers
if self.workers:
win.addstr(my - 4, x, self.online_str, curses.A_BOLD)
win.addstr(my - 4, x + len(self.online_str),
', '.join(sorted(self.workers)), curses.A_NORMAL)
else:
win.addstr(my - 4, x, 'No workers discovered.')
# Info
win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
win.addstr(
my - 3, x + len(self.info_str),
STATUS_SCREEN.format(
s=self.state,
w_alive=len([w for w in values(self.state.workers)
if w.alive]),
w_all=len(self.state.workers),
),
curses.A_DIM,
)
# Help
self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD)
self.safe_add_str(my - 2, x + len(self.help_title), self.help,
curses.A_DIM)
win.refresh()
def safe_add_str(self, y, x, string, *args, **kwargs):
if x + len(string) > self.screen_width:
string = string[:self.screen_width - x]
self.win.addstr(y, x, string, *args, **kwargs)
def init_screen(self):
with self.lock:
self.win = curses.initscr()
self.win.nodelay(True)
self.win.keypad(True)
curses.start_color()
curses.init_pair(1, self.foreground, self.background)
# exception states
curses.init_pair(2, curses.COLOR_RED, self.background)
# successful state
curses.init_pair(3, curses.COLOR_GREEN, self.background)
# revoked state
curses.init_pair(4, curses.COLOR_MAGENTA, self.background)
# greeting
curses.init_pair(5, curses.COLOR_BLUE, self.background)
# started state
curses.init_pair(6, curses.COLOR_YELLOW, self.foreground)
self.state_colors = {states.SUCCESS: curses.color_pair(3),
states.REVOKED: curses.color_pair(4),
states.STARTED: curses.color_pair(6)}
for state in states.EXCEPTION_STATES:
self.state_colors[state] = curses.color_pair(2)
curses.cbreak()
def resetscreen(self):
with self.lock:
curses.nocbreak()
self.win.keypad(False)
curses.echo()
curses.endwin()
def nap(self):
curses.napms(self.screen_delay)
@property
def tasks(self):
return list(self.state.tasks_by_time(limit=self.limit))
@property
def workers(self):
return [hostname for hostname, w in items(self.state.workers)
if w.alive]
class DisplayThread(threading.Thread): # pragma: no cover
def __init__(self, display):
self.display = display
self.shutdown = False
threading.Thread.__init__(self)
def run(self):
while not self.shutdown:
self.display.draw()
self.display.nap()
def capture_events(app, state, display): # pragma: no cover
def on_connection_error(exc, interval):
print('Connection Error: {0!r}. Retry in {1}s.'.format(
exc, interval), file=sys.stderr)
while 1:
print('-> evtop: starting capture...', file=sys.stderr)
with app.connection_for_read() as conn:
try:
conn.ensure_connection(on_connection_error,
app.conf.broker_connection_max_retries)
recv = app.events.Receiver(conn, handlers={'*': state.event})
display.resetscreen()
display.init_screen()
recv.capture()
except conn.connection_errors + conn.channel_errors as exc:
print('Connection lost: {0!r}'.format(exc), file=sys.stderr)
def evtop(app=None): # pragma: no cover
"""Start curses monitor."""
app = app_or_default(app)
state = app.events.State()
display = CursesMonitor(state, app)
display.init_screen()
refresher = DisplayThread(display)
refresher.start()
try:
capture_events(app, state, display)
except Exception:
refresher.shutdown = True
refresher.join()
display.resetscreen()
raise
except (KeyboardInterrupt, SystemExit):
refresher.shutdown = True
refresher.join()
display.resetscreen()
if __name__ == '__main__': # pragma: no cover
evtop()
|
[
"curses.start_color",
"curses.endwin",
"curses.napms",
"textwrap.wrap",
"curses.nocbreak",
"curses.cbreak",
"threading.Thread.__init__",
"celery.five.values",
"curses.init_pair",
"threading.RLock",
"celery.five.items",
"curses.beep",
"curses.color_pair",
"celery.utils.text.abbrtask",
"curses.initscr",
"celery.utils.text.abbr",
"curses.echo",
"time.time",
"celery.app.app_or_default",
"math.ceil",
"curses.noecho",
"itertools.count"
] |
[((17802, 17821), 'celery.app.app_or_default', 'app_or_default', (['app'], {}), '(app)\n', (17816, 17821), False, 'from celery.app import app_or_default\n'), ((1920, 1937), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1935, 1937), False, 'import threading\n'), ((4667, 4675), 'itertools.count', 'count', (['(2)'], {}), '(2)\n', (4672, 4675), False, 'from itertools import count\n'), ((6953, 6966), 'curses.echo', 'curses.echo', ([], {}), '()\n', (6964, 6966), False, 'import curses\n'), ((16303, 16334), 'curses.napms', 'curses.napms', (['self.screen_delay'], {}), '(self.screen_delay)\n', (16315, 16334), False, 'import curses\n'), ((16735, 16766), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (16760, 16766), False, 'import threading\n'), ((2408, 2432), 'math.ceil', 'ceil', (['(detail_width / 2.0)'], {}), '(detail_width / 2.0)\n', (2412, 2432), False, 'from math import ceil\n'), ((5258, 5271), 'curses.beep', 'curses.beep', ([], {}), '()\n', (5269, 5271), False, 'import curses\n'), ((5369, 5382), 'curses.beep', 'curses.beep', ([], {}), '()\n', (5380, 5382), False, 'import curses\n'), ((5949, 5958), 'itertools.count', 'count', (['xs'], {}), '(xs)\n', (5954, 5958), False, 'from itertools import count\n'), ((7395, 7410), 'curses.noecho', 'curses.noecho', ([], {}), '()\n', (7408, 7410), False, 'import curses\n'), ((7520, 7533), 'curses.beep', 'curses.beep', ([], {}), '()\n', (7531, 7533), False, 'import curses\n'), ((7836, 7845), 'itertools.count', 'count', (['xs'], {}), '(xs)\n', (7841, 7845), False, 'from itertools import count\n'), ((9374, 9387), 'curses.beep', 'curses.beep', ([], {}), '()\n', (9385, 9387), False, 'import curses\n'), ((9513, 9526), 'curses.beep', 'curses.beep', ([], {}), '()\n', (9524, 9526), False, 'import curses\n'), ((9584, 9593), 'itertools.count', 'count', (['xs'], {}), '(xs)\n', (9589, 9593), False, 'from itertools import count\n'), ((9981, 9990), 'itertools.count', 'count', (['xs'], {}), '(xs)\n', (9986, 9990), False, 'from itertools import count\n'), ((10182, 10208), 'textwrap.wrap', 'wrap', (["(result or '')", '(mx - 2)'], {}), "(result or '', mx - 2)\n", (10186, 10208), False, 'from textwrap import wrap\n'), ((11388, 11396), 'itertools.count', 'count', (['(2)'], {}), '(2)\n', (11393, 11396), False, 'from itertools import count\n'), ((15076, 15092), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (15090, 15092), False, 'import curses\n'), ((15174, 15194), 'curses.start_color', 'curses.start_color', ([], {}), '()\n', (15192, 15194), False, 'import curses\n'), ((15207, 15260), 'curses.init_pair', 'curses.init_pair', (['(1)', 'self.foreground', 'self.background'], {}), '(1, self.foreground, self.background)\n', (15223, 15260), False, 'import curses\n'), ((15304, 15358), 'curses.init_pair', 'curses.init_pair', (['(2)', 'curses.COLOR_RED', 'self.background'], {}), '(2, curses.COLOR_RED, self.background)\n', (15320, 15358), False, 'import curses\n'), ((15402, 15458), 'curses.init_pair', 'curses.init_pair', (['(3)', 'curses.COLOR_GREEN', 'self.background'], {}), '(3, curses.COLOR_GREEN, self.background)\n', (15418, 15458), False, 'import curses\n'), ((15499, 15557), 'curses.init_pair', 'curses.init_pair', (['(4)', 'curses.COLOR_MAGENTA', 'self.background'], {}), '(4, curses.COLOR_MAGENTA, self.background)\n', (15515, 15557), False, 'import curses\n'), ((15593, 15648), 'curses.init_pair', 'curses.init_pair', (['(5)', 'curses.COLOR_BLUE', 'self.background'], {}), '(5, curses.COLOR_BLUE, self.background)\n', (15609, 15648), False, 'import curses\n'), ((15689, 15746), 'curses.init_pair', 'curses.init_pair', (['(6)', 'curses.COLOR_YELLOW', 'self.foreground'], {}), '(6, curses.COLOR_YELLOW, self.foreground)\n', (15705, 15746), False, 'import curses\n'), ((16088, 16103), 'curses.cbreak', 'curses.cbreak', ([], {}), '()\n', (16101, 16103), False, 'import curses\n'), ((16168, 16185), 'curses.nocbreak', 'curses.nocbreak', ([], {}), '()\n', (16183, 16185), False, 'import curses\n'), ((16233, 16246), 'curses.echo', 'curses.echo', ([], {}), '()\n', (16244, 16246), False, 'import curses\n'), ((16259, 16274), 'curses.endwin', 'curses.endwin', ([], {}), '()\n', (16272, 16274), False, 'import curses\n'), ((2503, 2525), 'celery.utils.text.abbr', 'abbr', (['uuid', 'uuid_width'], {}), '(uuid, uuid_width)\n', (2507, 2525), False, 'from celery.utils.text import abbr, abbrtask\n'), ((2561, 2587), 'celery.utils.text.abbr', 'abbr', (['worker', 'worker_width'], {}), '(worker, worker_width)\n', (2565, 2587), False, 'from celery.utils.text import abbr, abbrtask\n'), ((2623, 2649), 'celery.utils.text.abbrtask', 'abbrtask', (['task', 'task_width'], {}), '(task, task_width)\n', (2631, 2649), False, 'from celery.utils.text import abbr, abbrtask\n'), ((2684, 2708), 'celery.utils.text.abbr', 'abbr', (['state', 'STATE_WIDTH'], {}), '(state, STATE_WIDTH)\n', (2688, 2708), False, 'from celery.utils.text import abbr, abbrtask\n'), ((8408, 8427), 'textwrap.wrap', 'wrap', (['value', '(mx - 2)'], {}), '(value, mx - 2)\n', (8412, 8427), False, 'from textwrap import wrap\n'), ((10680, 10686), 'time.time', 'time', ([], {}), '()\n', (10684, 10686), False, 'from time import time\n'), ((11483, 11503), 'curses.color_pair', 'curses.color_pair', (['(1)'], {}), '(1)\n', (11500, 11503), False, 'import curses\n'), ((15797, 15817), 'curses.color_pair', 'curses.color_pair', (['(3)'], {}), '(3)\n', (15814, 15817), False, 'import curses\n'), ((15868, 15888), 'curses.color_pair', 'curses.color_pair', (['(4)'], {}), '(4)\n', (15885, 15888), False, 'import curses\n'), ((15939, 15959), 'curses.color_pair', 'curses.color_pair', (['(6)'], {}), '(6)\n', (15956, 15959), False, 'import curses\n'), ((16054, 16074), 'curses.color_pair', 'curses.color_pair', (['(2)'], {}), '(2)\n', (16071, 16074), False, 'import curses\n'), ((16517, 16542), 'celery.five.items', 'items', (['self.state.workers'], {}), '(self.state.workers)\n', (16522, 16542), False, 'from celery.five import items, values\n'), ((6297, 6312), 'celery.five.items', 'items', (['subreply'], {}), '(subreply)\n', (6302, 6312), False, 'from celery.five import items, values\n'), ((6601, 6621), 'curses.color_pair', 'curses.color_pair', (['(2)'], {}), '(2)\n', (6618, 6621), False, 'import curses\n'), ((11586, 11606), 'curses.color_pair', 'curses.color_pair', (['(5)'], {}), '(5)\n', (11603, 11606), False, 'import curses\n'), ((6125, 6145), 'curses.color_pair', 'curses.color_pair', (['(2)'], {}), '(2)\n', (6142, 6145), False, 'import curses\n'), ((6731, 6751), 'curses.color_pair', 'curses.color_pair', (['(3)'], {}), '(3)\n', (6748, 6751), False, 'import curses\n'), ((12924, 12948), 'celery.utils.text.abbr', 'abbr', (["info['result']", '(16)'], {}), "(info['result'], 16)\n", (12928, 12948), False, 'from celery.utils.text import abbr, abbrtask\n'), ((9048, 9094), 'celery.utils.text.abbr', 'abbr', (["(' ' * 4 + subline)", '(self.screen_width - 4)'], {}), "(' ' * 4 + subline, self.screen_width - 4)\n", (9052, 9094), False, 'from celery.utils.text import abbr, abbrtask\n'), ((13081, 13092), 'celery.five.items', 'items', (['info'], {}), '(info)\n', (13086, 13092), False, 'from celery.five import items, values\n'), ((14360, 14386), 'celery.five.values', 'values', (['self.state.workers'], {}), '(self.state.workers)\n', (14366, 14386), False, 'from celery.five import items, values\n')]
|
"""A feature extractor for patients' utilization."""
from __future__ import absolute_import
import logging
import pandas as pd
from sutter.lib import postgres
from sutter.lib.feature_extractor import FeatureExtractor
log = logging.getLogger('feature_extraction')
class UtilizationExtractor(FeatureExtractor):
"""
Generates features related to the number of previous ER visits.
Features:
`pre_[n]_month_[adm_type]` - Number of [adm_type] (emergency, inpatient, outpatient) visits
during the [n] (3, 6, 12) month before admission
`er_visits_lace` - LACE score associated with number of ER visits:
the greater of number of emergency visits
during the 6 month before admission or 4.
"""
def extract(self):
query = """
SELECT
*
FROM {}.bayes_vw_feature_utilization
""".format(self._schema)
engine = postgres.get_connection()
res = pd.read_sql(query, engine)
log.info('The pre-pivot table has %d rows.' % len(res))
pivoted = pd.pivot_table(data=res, index='hsp_acct_study_id', columns='pre_adm_type',
aggfunc=sum, dropna=True, fill_value=0,
values=['pre_3_month', 'pre_6_month', 'pre_12_month'])
df_columns = [top + "_" + bottom.lower() for top, bottom in pivoted.columns.values]
df = pd.DataFrame(index=res.hsp_acct_study_id.unique())
df[df_columns] = pivoted
df.fillna(0, inplace=True)
df['er_visits_lace'] = df['pre_6_month_emergency'].apply(lambda cnt: min(cnt, 4))
return self.emit_df(df)
|
[
"logging.getLogger",
"sutter.lib.postgres.get_connection",
"pandas.pivot_table",
"pandas.read_sql"
] |
[((228, 267), 'logging.getLogger', 'logging.getLogger', (['"""feature_extraction"""'], {}), "('feature_extraction')\n", (245, 267), False, 'import logging\n'), ((969, 994), 'sutter.lib.postgres.get_connection', 'postgres.get_connection', ([], {}), '()\n', (992, 994), False, 'from sutter.lib import postgres\n'), ((1010, 1036), 'pandas.read_sql', 'pd.read_sql', (['query', 'engine'], {}), '(query, engine)\n', (1021, 1036), True, 'import pandas as pd\n'), ((1120, 1298), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'res', 'index': '"""hsp_acct_study_id"""', 'columns': '"""pre_adm_type"""', 'aggfunc': 'sum', 'dropna': '(True)', 'fill_value': '(0)', 'values': "['pre_3_month', 'pre_6_month', 'pre_12_month']"}), "(data=res, index='hsp_acct_study_id', columns='pre_adm_type',\n aggfunc=sum, dropna=True, fill_value=0, values=['pre_3_month',\n 'pre_6_month', 'pre_12_month'])\n", (1134, 1298), True, 'import pandas as pd\n')]
|
"""Perform normalization on inputs or rewards.
"""
import numpy as np
import torch
from gym.spaces import Box
def normalize_angle(x):
"""Wraps input angle to [-pi, pi].
"""
return ((x + np.pi) % (2 * np.pi)) - np.pi
class RunningMeanStd():
"""Calulates the running mean and std of a data stream.
Attributes:
mean (np.array): mean of data stream.
var (np.array): variance of data stream.
count (float): total count of data steam.
"""
def __init__(self, epsilon=1e-4, shape=()):
"""Initializes containers for data mean and variance.
Args:
epsilon (float): helps with arithmetic issues.
shape (tuple): the shape of the data stream's output.
"""
self.mean = np.zeros(shape, np.float64)
self.var = np.ones(shape, np.float64)
self.count = epsilon
def update(self, arr):
"""Update current stats with a new stream of data.
Args:
arr (np.array): 1D array of data, (batch_size, *shape).
"""
batch_mean = np.mean(arr, axis=0)
batch_var = np.var(arr, axis=0)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
"""Util function for `update` method.
"""
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
class BaseNormalizer(object):
"""Template/default normalizer.
Attributes:
read_only (bool): if to freeze the current stats being tracked.
"""
def __init__(self, read_only=False):
self.read_only = read_only
def set_read_only(self):
self.read_only = True
def unset_read_only(self):
self.read_only = False
def __call__(self, x, *args, **kwargs):
"""Invokes normalization on the given input.
"""
return x
def state_dict(self):
"""Returns snapshot of current stats.
"""
return {}
def load_state_dict(self, _):
"""Restores the stats from a snapshot.
"""
pass
class MeanStdNormalizer(BaseNormalizer):
"""Normalize by the running average.
"""
def __init__(self, shape=(), read_only=False, clip=10.0, epsilon=1e-8):
"""Initializes the data stream tracker.
Args:
shape (tuple): shape of data being tracked.
read_only (bool): if to freeze the tracker.
clip (float): bounds on the data.
epsilon (float): offset to provide divide-by-zero.
"""
super().__init__(read_only)
self.read_only = read_only
self.rms = RunningMeanStd(shape=shape)
self.clip = clip
self.epsilon = epsilon
def __call__(self, x):
"""Update tracker given data, optionally normalize the data.
"""
x = np.asarray(x)
if not self.read_only:
self.rms.update(x)
return np.clip(
(x - self.rms.mean) / np.sqrt(self.rms.var + self.epsilon),
-self.clip, self.clip)
def state_dict(self):
return {'mean': self.rms.mean, 'var': self.rms.var}
def load_state_dict(self, saved):
self.rms.mean = saved['mean']
self.rms.var = saved['var']
class RewardStdNormalizer(MeanStdNormalizer):
"""Reward normalization by running average of returns.
Papers:
* arxiv.org/pdf/1808.04355.pdf
* arxiv.org/pdf/1810.12894.pdf
Also see:
* github.com/openai/baselines/issues/538
"""
def __init__(self, gamma=0.99, read_only=False, clip=10.0, epsilon=1e-8):
"""Initializes the data stream tracker.
Args:
gamma (float): discount factor for rewards.
read_only (bool): if to freeze the tracker.
clip (float): bounds on the data.
epsilon (float): offset to provide divide-by-zero.
"""
# Reward has default shape (1,) or just ().
super().__init__((), read_only, clip, epsilon)
self.gamma = gamma
self.ret = None
def __call__(self, x, dones):
"""Update tracker given reward, optionally normalize the reward (only scaling).
"""
x = np.asarray(x)
if not self.read_only:
# Track running average of forward discounted returns.
if self.ret is None:
self.ret = np.zeros(x.shape[0])
self.ret = self.ret * self.gamma + x
self.rms.update(self.ret)
# Prevent information leak from previous episodes.
self.ret[dones.astype(np.long)] = 0
return np.clip(x / np.sqrt(self.rms.var + self.epsilon), -self.clip, self.clip)
class RescaleNormalizer(BaseNormalizer):
"""Apply constant scaling.
"""
def __init__(self, coef=1.0):
"""Initializes with fixed scaling constant.
Args:
coef (float): scaling coefficient.
"""
super().__init__(self)
self.coef = coef
def __call__(self, x):
"""Scale the input.
"""
if not isinstance(x, torch.Tensor):
x = np.asarray(x)
return self.coef * x
class ImageNormalizer(RescaleNormalizer):
"""Scale image pixles from [0,255] to [0,1].
"""
def __init__(self):
super().__init__(self, 1.0 / 255)
class ActionUnnormalizer(BaseNormalizer):
"""Assumes policy output action is in [-1,1], unnormalize it for gym env.
"""
def __init__(self, action_space):
"""Defines the mean and std for the bounded action space.
"""
super().__init__()
assert isinstance(action_space, Box), "action space must be gym.spaces.Box"
low, high = action_space.low, action_space.high
self.mean = (low + high) / 2.0
self.std = (high - low) / 2.0
def __call__(self, action):
"""Unnormalizes given input action.
"""
x = np.asarray(action)
return self.mean + x * self.std
|
[
"numpy.mean",
"numpy.sqrt",
"numpy.ones",
"numpy.asarray",
"numpy.square",
"numpy.zeros",
"numpy.var"
] |
[((787, 814), 'numpy.zeros', 'np.zeros', (['shape', 'np.float64'], {}), '(shape, np.float64)\n', (795, 814), True, 'import numpy as np\n'), ((834, 860), 'numpy.ones', 'np.ones', (['shape', 'np.float64'], {}), '(shape, np.float64)\n', (841, 860), True, 'import numpy as np\n'), ((1094, 1114), 'numpy.mean', 'np.mean', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1101, 1114), True, 'import numpy as np\n'), ((1135, 1154), 'numpy.var', 'np.var', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1141, 1154), True, 'import numpy as np\n'), ((3364, 3377), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3374, 3377), True, 'import numpy as np\n'), ((4742, 4755), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4752, 4755), True, 'import numpy as np\n'), ((6458, 6476), 'numpy.asarray', 'np.asarray', (['action'], {}), '(action)\n', (6468, 6476), True, 'import numpy as np\n'), ((5651, 5664), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (5661, 5664), True, 'import numpy as np\n'), ((3498, 3534), 'numpy.sqrt', 'np.sqrt', (['(self.rms.var + self.epsilon)'], {}), '(self.rms.var + self.epsilon)\n', (3505, 3534), True, 'import numpy as np\n'), ((4914, 4934), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (4922, 4934), True, 'import numpy as np\n'), ((5160, 5196), 'numpy.sqrt', 'np.sqrt', (['(self.rms.var + self.epsilon)'], {}), '(self.rms.var + self.epsilon)\n', (5167, 5196), True, 'import numpy as np\n'), ((1637, 1653), 'numpy.square', 'np.square', (['delta'], {}), '(delta)\n', (1646, 1653), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 21 20:09:08 2021
######################
##### read h5 ########
######################
# 1.read h5-file
h5_file = h5py.File(files[1],'r')
# 2.show all keys in h5-file
h5_file.keys()
# 3.循环读取所有 keys in h5-file
for key in h5_file.keys():
onekey = key
onekey_name = h5_file[key].name
# 4.已知某个group的 key "NN"
h5_file["NN"]
h5_file["NN"].keys()
f_dict = dict(h5_file["NN"])
f_dict.keys() # 所有的keyword
# 5.读取 group 的 datasets
data = f_dict["data"][()] # 建议
data = f_dict["data"].value # data 是 numpy 的 ndarray 多维数组模式
trace = data[0] # 某一道数据
# 6.读取 group 的 Int Float 类型
baz = f_dict["baz"].value
baz = h5_file["NN"]["baz"].value
# 7.读取 group 的 字符串
# encode的作用是将unicode编码转换成其他编码的字符串,如str2.encode(‘utf8’),表示将unicode编码的字符串str2转换成utf8编码
comp = h5_file["NN"]["comp"].value[0].decode('utf-8')
# 8. 关闭文件
f_dict.close()
######################
##### write h5 ########
######################
@author: yf
"""
#%%
import numpy as np
import h5py
import os
import glob
#%% 1. set parameter
file = "../../data/BJ.081_BJ.084__2020_04_11_00_00_00T2021_04_13_00_00_00__all.jld2"
chan = "NN"
dt = 0.005
#%% 2. read h5
# open file
f = h5py.File(file,'r')
# read data
data = f[chan]["data"][0]
# read parameters
azi = f[chan]["azi"][()]
baz = f[chan]["baz"][()]
maxlag = f[chan]["maxlag"][()]
cc_len = f[chan]["cc_len"][()]
cc_step = f[chan]["cc_step"][()]
corr_type = f[chan]["corr_type"][()]
comp = f[chan]["comp"][()]
dist = f[chan]["dist"][()] # dist = f[chan]["dist"].value
lat = f[chan]["lat"][()]
lon = f[chan]["lon"][()]
N_glob = f[chan]["N_glob"][()]
N_read = f[chan]["N_read"][()]
N_good = f[chan]["N_good"][()]
name = f[chan]["name"][()][0].decode('utf-8')
# close h5-file
f.close()
|
[
"h5py.File"
] |
[((1202, 1222), 'h5py.File', 'h5py.File', (['file', '"""r"""'], {}), "(file, 'r')\n", (1211, 1222), False, 'import h5py\n')]
|
import data_algebra
import data_algebra.test_util
from data_algebra.data_ops import * # https://github.com/WinVector/data_algebra
import data_algebra.util
import data_algebra.SQLite
def test_compount_where_and():
d = data_algebra.default_data_model.pd.DataFrame(
{
"a": ["a", "b", None, None],
"b": ["c", None, "d", None],
"x": [1, 2, None, None],
"y": [3, None, 4, None],
}
)
ops = describe_table(d, table_name="d").select_rows(
'a == "a" and b == "c" and x > 0 and y < 4'
)
db_handle = data_algebra.SQLite.SQLiteModel().db_handle(conn=None)
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
expect = data_algebra.default_data_model.pd.DataFrame(
{"a": ["a"], "b": ["c"], "x": [1.0], "y": [3.0],}
)
data_algebra.test_util.check_transform(ops=ops, data=d, expect=expect)
def test_compount_where_amp():
d = data_algebra.default_data_model.pd.DataFrame(
{
"a": ["a", "b", None, None],
"b": ["c", None, "d", None],
"x": [1, 2, None, None],
"y": [3, None, 4, None],
}
)
ops = describe_table(d, table_name="d").select_rows(
'a == "a" & b == "c" & x > 0 & y < 4'
)
db_handle = data_algebra.SQLite.SQLiteModel().db_handle(conn=None)
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
expect = data_algebra.default_data_model.pd.DataFrame(
{"a": ["a"], "b": ["c"], "x": [1.0], "y": [3.0],}
)
data_algebra.test_util.check_transform(ops=ops, data=d, expect=expect)
|
[
"data_algebra.default_data_model.pd.DataFrame",
"data_algebra.SQLite.SQLiteModel",
"data_algebra.test_util.check_transform"
] |
[((225, 387), 'data_algebra.default_data_model.pd.DataFrame', 'data_algebra.default_data_model.pd.DataFrame', (["{'a': ['a', 'b', None, None], 'b': ['c', None, 'd', None], 'x': [1, 2, None,\n None], 'y': [3, None, 4, None]}"], {}), "({'a': ['a', 'b', None, None],\n 'b': ['c', None, 'd', None], 'x': [1, 2, None, None], 'y': [3, None, 4,\n None]})\n", (269, 387), False, 'import data_algebra\n'), ((719, 817), 'data_algebra.default_data_model.pd.DataFrame', 'data_algebra.default_data_model.pd.DataFrame', (["{'a': ['a'], 'b': ['c'], 'x': [1.0], 'y': [3.0]}"], {}), "({'a': ['a'], 'b': ['c'], 'x':\n [1.0], 'y': [3.0]})\n", (763, 817), False, 'import data_algebra\n'), ((834, 904), 'data_algebra.test_util.check_transform', 'data_algebra.test_util.check_transform', ([], {'ops': 'ops', 'data': 'd', 'expect': 'expect'}), '(ops=ops, data=d, expect=expect)\n', (872, 904), False, 'import data_algebra\n'), ((946, 1108), 'data_algebra.default_data_model.pd.DataFrame', 'data_algebra.default_data_model.pd.DataFrame', (["{'a': ['a', 'b', None, None], 'b': ['c', None, 'd', None], 'x': [1, 2, None,\n None], 'y': [3, None, 4, None]}"], {}), "({'a': ['a', 'b', None, None],\n 'b': ['c', None, 'd', None], 'x': [1, 2, None, None], 'y': [3, None, 4,\n None]})\n", (990, 1108), False, 'import data_algebra\n'), ((1434, 1532), 'data_algebra.default_data_model.pd.DataFrame', 'data_algebra.default_data_model.pd.DataFrame', (["{'a': ['a'], 'b': ['c'], 'x': [1.0], 'y': [3.0]}"], {}), "({'a': ['a'], 'b': ['c'], 'x':\n [1.0], 'y': [3.0]})\n", (1478, 1532), False, 'import data_algebra\n'), ((1549, 1619), 'data_algebra.test_util.check_transform', 'data_algebra.test_util.check_transform', ([], {'ops': 'ops', 'data': 'd', 'expect': 'expect'}), '(ops=ops, data=d, expect=expect)\n', (1587, 1619), False, 'import data_algebra\n'), ((586, 619), 'data_algebra.SQLite.SQLiteModel', 'data_algebra.SQLite.SQLiteModel', ([], {}), '()\n', (617, 619), False, 'import data_algebra\n'), ((1301, 1334), 'data_algebra.SQLite.SQLiteModel', 'data_algebra.SQLite.SQLiteModel', ([], {}), '()\n', (1332, 1334), False, 'import data_algebra\n')]
|
from setuptools import setup, find_packages
setup(
name='passgen-py',
packages=find_packages(),
version='1.1',
description='Generate Passwords Deterministically based on a Master Password.',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
python_requires='>=3.6, <4',
entry_points={
'console_scripts': [
'passgen=src:cli',
],
},
install_requires=['click', 'pyperclip'],
)
|
[
"setuptools.find_packages"
] |
[((88, 103), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (101, 103), False, 'from setuptools import setup, find_packages\n')]
|
#!/usr/bin/env python
import sys
import colorama
from pick_db_file import pick_db_file
import db_connection
import card_repository
from review_cards import review_cards
from new_card import new_card
from new_cards import new_cards
import review
from user_colors import print_info, print_instruction, print_error
from usage_info import print_usage_info
def main():
# Initialise colorama
colorama.init()
valid_args = ['-n', '-n2', '-s', '-s2', '-e', '-e2', '-b', '-bf', '-bb', '-bs', '-bl', '-br']
if len(sys.argv) > 1 and sys.argv[1] not in valid_args:
print_usage_info(sys.argv)
if sys.argv[1] not in ['-h', '--help']:
sys.exit(1)
sys.exit()
db_file = pick_db_file()
conn, cursor = db_connection.connect(db_file)
card_repository.create_table_if_not_exists(conn, cursor)
if len(sys.argv) == 1:
table_is_empty = card_repository.check_if_empty(cursor)
if table_is_empty:
print_error("You don't have any cards yet.")
print_instruction(
'Create some cards by launching garrick with one of the following options first:'
)
print_instruction('\t-n\tCreate cards starting in one-way mode.')
print_instruction('\t-n2\tCreate cards starting in two-way mode.')
print_instruction('\t-s\tCreate cards starting in single-line and one-way mode.')
print_instruction('\t-s2\tCreate cards starting in single-line and two-way mode.')
print_instruction('\t-e\tCreate cards starting in editor mode and in one-way mode.')
print_instruction('\t-s2\tCreate cards starting in editor mode and in two-way mode.')
else:
review.review(conn, cursor)
elif sys.argv[1] == '-n':
new_cards(conn, cursor, two_way_card=False, single_line_mode=False, editor_mode=False)
elif sys.argv[1] == '-n2':
new_cards(conn, cursor, two_way_card=True, single_line_mode=False, editor_mode=False)
elif sys.argv[1] == '-s':
new_cards(conn, cursor, two_way_card=False, single_line_mode=True, editor_mode=False)
elif sys.argv[1] == '-s2':
new_cards(conn, cursor, two_way_card=True, single_line_mode=True, editor_mode=False)
elif sys.argv[1] == '-e':
new_cards(conn, cursor, two_way_card=False, single_line_mode=False, editor_mode=True)
elif sys.argv[1] == '-e2':
new_cards(conn, cursor, two_way_card=True, single_line_mode=False, editor_mode=True)
elif sys.argv[1] == '-b':
review.browse_by_regex(conn, cursor)
elif sys.argv[1] == '-bf':
review.browse_by_regex_front(conn, cursor)
elif sys.argv[1] == '-bb':
review.browse_by_regex_back(conn, cursor)
elif sys.argv[1] == '-bs':
review.browse_by_score(conn, cursor)
elif sys.argv[1] == '-bl':
review.browse_by_last_viewed(conn, cursor)
elif sys.argv[1] == '-br':
review.browse_by_last_viewed_reverse(conn, cursor)
print_info('Kbai')
db_connection.disconnect(conn, cursor)
if __name__ == '__main__':
main()
|
[
"user_colors.print_info",
"pick_db_file.pick_db_file",
"sys.exit",
"usage_info.print_usage_info",
"card_repository.check_if_empty",
"colorama.init",
"user_colors.print_error",
"review.browse_by_regex_front",
"review.browse_by_score",
"review.browse_by_last_viewed",
"review.browse_by_regex",
"new_cards.new_cards",
"card_repository.create_table_if_not_exists",
"review.browse_by_regex_back",
"review.review",
"db_connection.disconnect",
"db_connection.connect",
"review.browse_by_last_viewed_reverse",
"user_colors.print_instruction"
] |
[((396, 411), 'colorama.init', 'colorama.init', ([], {}), '()\n', (409, 411), False, 'import colorama\n'), ((721, 735), 'pick_db_file.pick_db_file', 'pick_db_file', ([], {}), '()\n', (733, 735), False, 'from pick_db_file import pick_db_file\n'), ((755, 785), 'db_connection.connect', 'db_connection.connect', (['db_file'], {}), '(db_file)\n', (776, 785), False, 'import db_connection\n'), ((790, 846), 'card_repository.create_table_if_not_exists', 'card_repository.create_table_if_not_exists', (['conn', 'cursor'], {}), '(conn, cursor)\n', (832, 846), False, 'import card_repository\n'), ((3009, 3027), 'user_colors.print_info', 'print_info', (['"""Kbai"""'], {}), "('Kbai')\n", (3019, 3027), False, 'from user_colors import print_info, print_instruction, print_error\n'), ((3037, 3075), 'db_connection.disconnect', 'db_connection.disconnect', (['conn', 'cursor'], {}), '(conn, cursor)\n', (3061, 3075), False, 'import db_connection\n'), ((580, 606), 'usage_info.print_usage_info', 'print_usage_info', (['sys.argv'], {}), '(sys.argv)\n', (596, 606), False, 'from usage_info import print_usage_info\n'), ((695, 705), 'sys.exit', 'sys.exit', ([], {}), '()\n', (703, 705), False, 'import sys\n'), ((904, 942), 'card_repository.check_if_empty', 'card_repository.check_if_empty', (['cursor'], {}), '(cursor)\n', (934, 942), False, 'import card_repository\n'), ((675, 686), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (683, 686), False, 'import sys\n'), ((982, 1026), 'user_colors.print_error', 'print_error', (['"""You don\'t have any cards yet."""'], {}), '("You don\'t have any cards yet.")\n', (993, 1026), False, 'from user_colors import print_info, print_instruction, print_error\n'), ((1039, 1149), 'user_colors.print_instruction', 'print_instruction', (['"""Create some cards by launching garrick with one of the following options first:"""'], {}), "(\n 'Create some cards by launching garrick with one of the following options first:'\n )\n", (1056, 1149), False, 'from user_colors import print_info, print_instruction, print_error\n'), ((1182, 1247), 'user_colors.print_instruction', 'print_instruction', (['"""\t-n\tCreate cards starting in one-way mode."""'], {}), "('\\t-n\\tCreate cards starting in one-way mode.')\n", (1199, 1247), False, 'from user_colors import print_info, print_instruction, print_error\n'), ((1260, 1326), 'user_colors.print_instruction', 'print_instruction', (['"""\t-n2\tCreate cards starting in two-way mode."""'], {}), "('\\t-n2\\tCreate cards starting in two-way mode.')\n", (1277, 1326), False, 'from user_colors import print_info, print_instruction, print_error\n'), ((1339, 1425), 'user_colors.print_instruction', 'print_instruction', (['"""\t-s\tCreate cards starting in single-line and one-way mode."""'], {}), "(\n '\\t-s\\tCreate cards starting in single-line and one-way mode.')\n", (1356, 1425), False, 'from user_colors import print_info, print_instruction, print_error\n'), ((1433, 1520), 'user_colors.print_instruction', 'print_instruction', (['"""\t-s2\tCreate cards starting in single-line and two-way mode."""'], {}), "(\n '\\t-s2\\tCreate cards starting in single-line and two-way mode.')\n", (1450, 1520), False, 'from user_colors import print_info, print_instruction, print_error\n'), ((1528, 1617), 'user_colors.print_instruction', 'print_instruction', (['"""\t-e\tCreate cards starting in editor mode and in one-way mode."""'], {}), "(\n '\\t-e\\tCreate cards starting in editor mode and in one-way mode.')\n", (1545, 1617), False, 'from user_colors import print_info, print_instruction, print_error\n'), ((1625, 1715), 'user_colors.print_instruction', 'print_instruction', (['"""\t-s2\tCreate cards starting in editor mode and in two-way mode."""'], {}), "(\n '\\t-s2\\tCreate cards starting in editor mode and in two-way mode.')\n", (1642, 1715), False, 'from user_colors import print_info, print_instruction, print_error\n'), ((1737, 1764), 'review.review', 'review.review', (['conn', 'cursor'], {}), '(conn, cursor)\n', (1750, 1764), False, 'import review\n'), ((1803, 1893), 'new_cards.new_cards', 'new_cards', (['conn', 'cursor'], {'two_way_card': '(False)', 'single_line_mode': '(False)', 'editor_mode': '(False)'}), '(conn, cursor, two_way_card=False, single_line_mode=False,\n editor_mode=False)\n', (1812, 1893), False, 'from new_cards import new_cards\n'), ((1929, 2018), 'new_cards.new_cards', 'new_cards', (['conn', 'cursor'], {'two_way_card': '(True)', 'single_line_mode': '(False)', 'editor_mode': '(False)'}), '(conn, cursor, two_way_card=True, single_line_mode=False,\n editor_mode=False)\n', (1938, 2018), False, 'from new_cards import new_cards\n'), ((2053, 2142), 'new_cards.new_cards', 'new_cards', (['conn', 'cursor'], {'two_way_card': '(False)', 'single_line_mode': '(True)', 'editor_mode': '(False)'}), '(conn, cursor, two_way_card=False, single_line_mode=True,\n editor_mode=False)\n', (2062, 2142), False, 'from new_cards import new_cards\n'), ((2178, 2266), 'new_cards.new_cards', 'new_cards', (['conn', 'cursor'], {'two_way_card': '(True)', 'single_line_mode': '(True)', 'editor_mode': '(False)'}), '(conn, cursor, two_way_card=True, single_line_mode=True,\n editor_mode=False)\n', (2187, 2266), False, 'from new_cards import new_cards\n'), ((2301, 2390), 'new_cards.new_cards', 'new_cards', (['conn', 'cursor'], {'two_way_card': '(False)', 'single_line_mode': '(False)', 'editor_mode': '(True)'}), '(conn, cursor, two_way_card=False, single_line_mode=False,\n editor_mode=True)\n', (2310, 2390), False, 'from new_cards import new_cards\n'), ((2426, 2514), 'new_cards.new_cards', 'new_cards', (['conn', 'cursor'], {'two_way_card': '(True)', 'single_line_mode': '(False)', 'editor_mode': '(True)'}), '(conn, cursor, two_way_card=True, single_line_mode=False,\n editor_mode=True)\n', (2435, 2514), False, 'from new_cards import new_cards\n'), ((2549, 2585), 'review.browse_by_regex', 'review.browse_by_regex', (['conn', 'cursor'], {}), '(conn, cursor)\n', (2571, 2585), False, 'import review\n'), ((2626, 2668), 'review.browse_by_regex_front', 'review.browse_by_regex_front', (['conn', 'cursor'], {}), '(conn, cursor)\n', (2654, 2668), False, 'import review\n'), ((2709, 2750), 'review.browse_by_regex_back', 'review.browse_by_regex_back', (['conn', 'cursor'], {}), '(conn, cursor)\n', (2736, 2750), False, 'import review\n'), ((2791, 2827), 'review.browse_by_score', 'review.browse_by_score', (['conn', 'cursor'], {}), '(conn, cursor)\n', (2813, 2827), False, 'import review\n'), ((2867, 2909), 'review.browse_by_last_viewed', 'review.browse_by_last_viewed', (['conn', 'cursor'], {}), '(conn, cursor)\n', (2895, 2909), False, 'import review\n'), ((2949, 2999), 'review.browse_by_last_viewed_reverse', 'review.browse_by_last_viewed_reverse', (['conn', 'cursor'], {}), '(conn, cursor)\n', (2985, 2999), False, 'import review\n')]
|
import numpy as np
from scipy.special import factorial
from pyapprox.indexing import hash_array
from pyapprox.indexing import compute_hyperbolic_level_indices
def multiply_multivariate_polynomials(indices1,coeffs1,indices2,coeffs2):
"""
TODO: instead of using dictionary to colect terms consider using
unique_indices,repeated_idx=np.unique(
indices[active_idx,:],axis=1,return_inverse=True)
as is done in multivariate_polynomials.conditional_moments_of_polynomial_chaos_expansion. Choose which one is faster
Parameters
----------
index : multidimensional index
multidimensional index specifying the polynomial degree in each
dimension
Returns
-------
"""
num_vars = indices1.shape[0]
num_indices1 = indices1.shape[1]
num_indices2 = indices2.shape[1]
assert num_indices1==coeffs1.shape[0]
assert num_indices2==coeffs2.shape[0]
assert num_vars==indices2.shape[0]
indices_dict = dict()
max_num_indices = num_indices1*num_indices2
indices = np.empty((num_vars,max_num_indices),int)
coeffs = np.empty((max_num_indices),float)
kk = 0
for ii in range(num_indices1):
index1 = indices1[:,ii]
coeff1 = coeffs1[ii]
for jj in range(num_indices2):
index= index1+indices2[:,jj]
key = hash_array(index)
coeff = coeff1*coeffs2[jj]
if key in indices_dict:
coeffs[indices_dict[key]]+=coeff
else:
indices_dict[key]=kk
indices[:,kk]=index
coeffs[kk]=coeff
kk+=1
indices = indices[:,:kk]
coeffs = coeffs[:kk]
return indices, coeffs
def coeffs_of_power_of_nd_linear_polynomial(num_vars, degree, linear_coeffs):
"""
Compute the polynomial (coefficients and indices) obtained by raising
a linear multivariate polynomial (no constant term) to some power.
Parameters
----------
num_vars : integer
The number of variables
degree : integer
The power of the linear polynomial
linear_coeffs: np.ndarray (num_vars)
The coefficients of the linear polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
The coefficients of the new polynomial
indices : np.ndarray (num_vars, num_terms)
The set of multivariate indices that define the new polynomial
"""
assert len(linear_coeffs)==num_vars
coeffs, indices=multinomial_coeffs_of_power_of_nd_linear_polynomial(
num_vars, degree)
for ii in range(indices.shape[1]):
index = indices[:,ii]
for dd in range(num_vars):
degree = index[dd]
coeffs[ii] *= linear_coeffs[dd]**degree
return coeffs, indices
def substitute_polynomial_for_variables_in_polynomial(
indices_in,coeffs_in,indices,coeffs,var_idx):
num_vars, num_terms = indices.shape
new_indices = []
new_coeffs = []
for ii in range(num_terms):
index = indices[:,ii]
pows = index[var_idx]
ind,cf = substitute_polynomial_for_variables_in_single_basis_term(
indices_in,coeffs_in,index,coeffs[ii],var_idx,pows)
new_indices.append(ind)
new_coeffs.append(cf)
new_indices = np.hstack(new_indices)
new_coeffs = np.vstack(new_coeffs)
return new_indices, new_coeffs
def substitute_polynomial_for_variables_in_single_basis_term(
indices_in,coeffs_in,basis_index,basis_coeff,var_idx,global_var_idx,
num_global_vars):
"""
var_idx : np.ndarray (nsub_vars)
The dimensions in basis_index which will be substituted
global_var_idx : [ np.ndarray(nvars[ii]) for ii in num_inputs]
The index of the active variables for each input
"""
num_inputs = var_idx.shape[0]
assert num_inputs==len(indices_in)
assert num_inputs==len(coeffs_in)
assert basis_coeff.shape[0]==1
assert var_idx.max()<basis_index.shape[0]
assert basis_index.shape[1]==1
assert len(global_var_idx)==num_inputs
# store input indices in global_var_idx
temp = []
for ii in range(num_inputs):
ind = np.zeros((num_global_vars,indices_in[ii].shape[1]))
ind[global_var_idx,:] = indices_in[ii]
temp.append(ind)
indices_in = temp
jj=0
degree = basis_index[var_idx[jj]]
c1,ind1 = coeffs_of_power_of_polynomial(
indices_in,coeffs_in[:,jj:jj+1],degree)
for jj in range(1,var_idx.shape[0]):
degree = basis_index[var_idx[jj]]
c2,ind2 = coeffs_of_power_of_polynomial(
indices_in,coeffs_in[:,jj:jj+1],degree)
ind1,c1 = multiply_multivariate_polynomials(ind1,c1,ind2,c2)
# this mask may be wrong. I might be confusing global and var idx
mask = np.ones(basis_index.shape[0],dtype=bool); mask[var_idx]=False
print(ind1.shape,mask.shape)
ind1[mask,:] += basis_index[mask]
c1*=basis_coeff
return ind1, c1
def composition_of_polynomials(indices_list,coeffs_list):
npolys = len(indices_list)
assert npolys==len(coeffs_list)
for ii in range(1,npolys):
new_poly = 2
return new_poly
def coeffs_of_power_of_polynomial(indices, coeffs, degree):
"""
Compute the polynomial (coefficients and indices) obtained by raising
a multivariate polynomial to some power.
TODO: Deprecate coeffs_of_power_of_nd_linear_polynomial as that function
can be obtained as a special case of this function
Parameters
----------
indices : np.ndarray (num_vars,num_terms)
The indices of the multivariate polynomial
coeffs: np.ndarray (num_vars)
The coefficients of the polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
The coefficients of the new polynomial
indices : np.ndarray (num_vars, num_terms)
The set of multivariate indices that define the new polynomial
"""
num_vars, num_terms = indices.shape
assert indices.shape[1]==coeffs.shape[0]
multinomial_coeffs, multinomial_indices = \
multinomial_coeffs_of_power_of_nd_linear_polynomial(num_terms, degree)
new_indices = np.zeros((num_vars,multinomial_indices.shape[1]))
new_coeffs = np.tile(multinomial_coeffs[:,np.newaxis],coeffs.shape[1])
for ii in range(multinomial_indices.shape[1]):
multinomial_index = multinomial_indices[:,ii]
for dd in range(num_terms):
deg = multinomial_index[dd]
new_coeffs[ii] *= coeffs[dd]**deg
new_indices[:,ii] += indices[:,dd]*deg
return new_coeffs, new_indices
def group_like_terms(coeffs, indices):
if coeffs.ndim==1:
coeffs = coeffs[:,np.newaxis]
num_vars,num_indices = indices.shape
indices_dict = {}
for ii in range(num_indices):
key = hash_array(indices[:,ii])
if not key in indices_dict:
indices_dict[key] = [coeffs[ii],ii]
else:
indices_dict[key] = [indices_dict[key][0]+coeffs[ii],ii]
new_coeffs = np.empty((len(indices_dict),coeffs.shape[1]))
new_indices = np.empty((num_vars,len(indices_dict)),dtype=int)
ii=0
for key, item in indices_dict.items():
new_indices[:,ii] = indices[:,item[1]]
new_coeffs[ii] = item[0]
ii+=1
return new_coeffs, new_indices
def multinomial_coefficient(index):
"""Compute the multinomial coefficient of an index [i1,i2,...,id].
Parameters
----------
index : multidimensional index
multidimensional index specifying the polynomial degree in each
dimension
Returns
-------
coeff : double
the multinomial coefficient
"""
level = index.sum()
denom = np.prod(factorial(index))
coeff = factorial(level)/denom
return coeff
def multinomial_coefficients(indices):
coeffs = np.empty((indices.shape[1]),float)
for i in range(indices.shape[1]):
coeffs[i] = multinomial_coefficient(indices[:,i])
return coeffs
def multinomial_coeffs_of_power_of_nd_linear_polynomial(num_vars,degree):
""" Compute the multinomial coefficients of the individual terms
obtained when taking the power of a linear polynomial
(without constant term).
Given a linear multivariate polynomial e.g.
e.g. (x1+x2+x3)**2 = x1**2+2*x1*x2+2*x1*x3+2*x2**2+x2*x3+x3**2
return the coefficients of each quadratic term, i.e.
[1,2,2,1,2,1]
Parameters
----------
num_vars : integer
the dimension of the multivariate polynomial
degree : integer
the power of the linear polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
the multinomial coefficients of the polynomial obtained when
raising the linear multivariate polynomial to the power=degree
indices: np.ndarray (num_terms)
the indices of the polynomial obtained when
raising the linear multivariate polynomial to the power=degree
"""
indices = compute_hyperbolic_level_indices(num_vars,degree,1.0)
coeffs = multinomial_coefficients(indices)
return coeffs, indices
def add_polynomials(indices_list, coeffs_list):
"""
Add many polynomials together.
Example:
p1 = x1**2+x2+x3, p2 = x2**2+2*x3
p3 = p1+p2
return the degrees of each term in the the polynomial
p3 = x1**2+x2+3*x3+x2**2
[2, 1, 1, 2]
and the coefficients of each of these terms
[1., 1., 3., 1.]
Parameters
----------
indices_list : list [np.ndarray (num_vars,num_indices_i)]
List of polynomial indices. indices_i may be different for each
polynomial
coeffs_list : list [np.ndarray (num_indices_i,num_qoi)]
List of polynomial coefficients. indices_i may be different for each
polynomial. num_qoi must be the same for each list element.
Returns
-------
indices: np.ndarray (num_vars,num_terms)
the polynomial indices of the polynomial obtained from
summing the polynomials. This will be the union of the indices
of the input polynomials
coeffs: np.ndarray (num_terms,num_qoi)
the polynomial coefficients of the polynomial obtained from
summing the polynomials
"""
num_polynomials = len(indices_list)
assert num_polynomials==len(coeffs_list)
indices_dict = dict()
indices = []
coeff = []
ii=0; kk=0
for jj in range(indices_list[ii].shape[1]):
assert coeffs_list[ii].ndim==2
assert coeffs_list[ii].shape[0]==indices_list[ii].shape[1]
index=indices_list[ii][:,jj]
indices_dict[hash_array(index)]=kk
indices.append(index)
coeff.append(coeffs_list[ii][jj,:].copy())
kk+=1
for ii in range(1,num_polynomials):
#print indices_list[ii].T,num_polynomials
assert coeffs_list[ii].ndim==2
assert coeffs_list[ii].shape[0]==indices_list[ii].shape[1]
for jj in range(indices_list[ii].shape[1]):
index=indices_list[ii][:,jj]
key = hash_array(index)
if key in indices_dict:
nn = indices_dict[key]
coeff[nn]+=coeffs_list[ii][jj,:]
else:
indices_dict[key]=kk
indices.append(index)
coeff.append(coeffs_list[ii][jj,:].copy())
kk+=1
indices = np.asarray(indices).T
coeff = np.asarray(coeff)
return indices, coeff
def get_indices_double_set(indices):
"""
Given muultivariate indices
[i1,i2,...,]
Compute its double set by
[i1*i1,i1*i2,...,i2*i2,i2*i3...]
The double set will only contain unique indices
Parameters
----------
indices : np.ndarray (num_vars,num_indices)
The initial indices
Returns
-------
double_set_indices : np.ndarray (num_vars,num_indices)
The double set of indices
"""
dummy_coeffs = np.zeros(indices.shape[1])
double_set_indices = multiply_multivariate_polynomials(
indices,dummy_coeffs,indices,dummy_coeffs)[0]
return double_set_indices
def shift_momomial_expansion(coef,shift,scale):
assert coef.ndim==1
shifted_coef = np.zeros_like(coef)
shifted_coef[0]=coef[0]
nterms = coef.shape[0]
for ii in range(1,nterms):
temp = np.polynomial.polynomial.polypow([1,-shift],ii)
shifted_coef[:ii+1] += coef[ii]*temp[::-1]/scale**ii
return shifted_coef
#Some of these functions can be replaced by numpy functions described at
#https://docs.scipy.org/doc/numpy/reference/routines.polynomials.polynomial.html
|
[
"numpy.tile",
"numpy.ones",
"numpy.hstack",
"scipy.special.factorial",
"pyapprox.indexing.compute_hyperbolic_level_indices",
"numpy.asarray",
"pyapprox.indexing.hash_array",
"numpy.zeros",
"numpy.empty",
"numpy.vstack",
"numpy.polynomial.polynomial.polypow",
"numpy.zeros_like"
] |
[((1047, 1089), 'numpy.empty', 'np.empty', (['(num_vars, max_num_indices)', 'int'], {}), '((num_vars, max_num_indices), int)\n', (1055, 1089), True, 'import numpy as np\n'), ((1101, 1133), 'numpy.empty', 'np.empty', (['max_num_indices', 'float'], {}), '(max_num_indices, float)\n', (1109, 1133), True, 'import numpy as np\n'), ((3273, 3295), 'numpy.hstack', 'np.hstack', (['new_indices'], {}), '(new_indices)\n', (3282, 3295), True, 'import numpy as np\n'), ((3313, 3334), 'numpy.vstack', 'np.vstack', (['new_coeffs'], {}), '(new_coeffs)\n', (3322, 3334), True, 'import numpy as np\n'), ((4780, 4821), 'numpy.ones', 'np.ones', (['basis_index.shape[0]'], {'dtype': 'bool'}), '(basis_index.shape[0], dtype=bool)\n', (4787, 4821), True, 'import numpy as np\n'), ((6143, 6193), 'numpy.zeros', 'np.zeros', (['(num_vars, multinomial_indices.shape[1])'], {}), '((num_vars, multinomial_indices.shape[1]))\n', (6151, 6193), True, 'import numpy as np\n'), ((6210, 6269), 'numpy.tile', 'np.tile', (['multinomial_coeffs[:, np.newaxis]', 'coeffs.shape[1]'], {}), '(multinomial_coeffs[:, np.newaxis], coeffs.shape[1])\n', (6217, 6269), True, 'import numpy as np\n'), ((7823, 7856), 'numpy.empty', 'np.empty', (['indices.shape[1]', 'float'], {}), '(indices.shape[1], float)\n', (7831, 7856), True, 'import numpy as np\n'), ((8948, 9003), 'pyapprox.indexing.compute_hyperbolic_level_indices', 'compute_hyperbolic_level_indices', (['num_vars', 'degree', '(1.0)'], {}), '(num_vars, degree, 1.0)\n', (8980, 9003), False, 'from pyapprox.indexing import compute_hyperbolic_level_indices\n'), ((11416, 11433), 'numpy.asarray', 'np.asarray', (['coeff'], {}), '(coeff)\n', (11426, 11433), True, 'import numpy as np\n'), ((11944, 11970), 'numpy.zeros', 'np.zeros', (['indices.shape[1]'], {}), '(indices.shape[1])\n', (11952, 11970), True, 'import numpy as np\n'), ((12207, 12226), 'numpy.zeros_like', 'np.zeros_like', (['coef'], {}), '(coef)\n', (12220, 12226), True, 'import numpy as np\n'), ((4154, 4206), 'numpy.zeros', 'np.zeros', (['(num_global_vars, indices_in[ii].shape[1])'], {}), '((num_global_vars, indices_in[ii].shape[1]))\n', (4162, 4206), True, 'import numpy as np\n'), ((6799, 6825), 'pyapprox.indexing.hash_array', 'hash_array', (['indices[:, ii]'], {}), '(indices[:, ii])\n', (6809, 6825), False, 'from pyapprox.indexing import hash_array\n'), ((7700, 7716), 'scipy.special.factorial', 'factorial', (['index'], {}), '(index)\n', (7709, 7716), False, 'from scipy.special import factorial\n'), ((7730, 7746), 'scipy.special.factorial', 'factorial', (['level'], {}), '(level)\n', (7739, 7746), False, 'from scipy.special import factorial\n'), ((11382, 11401), 'numpy.asarray', 'np.asarray', (['indices'], {}), '(indices)\n', (11392, 11401), True, 'import numpy as np\n'), ((12328, 12377), 'numpy.polynomial.polynomial.polypow', 'np.polynomial.polynomial.polypow', (['[1, -shift]', 'ii'], {}), '([1, -shift], ii)\n', (12360, 12377), True, 'import numpy as np\n'), ((1340, 1357), 'pyapprox.indexing.hash_array', 'hash_array', (['index'], {}), '(index)\n', (1350, 1357), False, 'from pyapprox.indexing import hash_array\n'), ((10618, 10635), 'pyapprox.indexing.hash_array', 'hash_array', (['index'], {}), '(index)\n', (10628, 10635), False, 'from pyapprox.indexing import hash_array\n'), ((11051, 11068), 'pyapprox.indexing.hash_array', 'hash_array', (['index'], {}), '(index)\n', (11061, 11068), False, 'from pyapprox.indexing import hash_array\n')]
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from mock import Mock, patch
from vault.tests.fakes import fake_request
from vault.views import SetProjectView
from django.utils.translation import ugettext as _
class SetProjectTest(TestCase):
def setUp(self):
self.view = SetProjectView.as_view()
self.request = fake_request(method='GET')
self.request.user.is_authenticated = lambda: False
def tearDown(self):
patch.stopall()
def test_set_project_needs_authentication(self):
response = self.view(self.request)
self.assertEqual(response.status_code, 302)
@patch('vault.views.switch')
def test_set_new_project_id_to_session(self, mock_switch):
self.request.user.is_authenticated = lambda: True
self.assertEqual(self.request.session.get('project_id'), '1')
response = self.view(self.request, project_id=2)
self.assertEqual(self.request.session.get('project_id'), 2)
@patch('vault.views.switch')
def test_set_new_project_id_to_session_exception(self, mock_switch):
self.request.user.is_authenticated = lambda: True
mock_switch.side_effect = ValueError()
self.assertEqual(self.request.session.get('project_id'), '1')
response = self.view(self.request, project_id=2)
self.assertEqual(self.request.session.get('project_id'), 2)
msgs = [msg for msg in self.request._messages]
self.assertEqual(msgs[0].message, _('Unable to change your project.'))
|
[
"mock.patch.stopall",
"mock.patch",
"vault.views.SetProjectView.as_view",
"django.utils.translation.ugettext",
"vault.tests.fakes.fake_request"
] |
[((632, 659), 'mock.patch', 'patch', (['"""vault.views.switch"""'], {}), "('vault.views.switch')\n", (637, 659), False, 'from mock import Mock, patch\n'), ((983, 1010), 'mock.patch', 'patch', (['"""vault.views.switch"""'], {}), "('vault.views.switch')\n", (988, 1010), False, 'from mock import Mock, patch\n'), ((294, 318), 'vault.views.SetProjectView.as_view', 'SetProjectView.as_view', ([], {}), '()\n', (316, 318), False, 'from vault.views import SetProjectView\n'), ((342, 368), 'vault.tests.fakes.fake_request', 'fake_request', ([], {'method': '"""GET"""'}), "(method='GET')\n", (354, 368), False, 'from vault.tests.fakes import fake_request\n'), ((461, 476), 'mock.patch.stopall', 'patch.stopall', ([], {}), '()\n', (474, 476), False, 'from mock import Mock, patch\n'), ((1485, 1520), 'django.utils.translation.ugettext', '_', (['"""Unable to change your project."""'], {}), "('Unable to change your project.')\n", (1486, 1520), True, 'from django.utils.translation import ugettext as _\n')]
|
import numpy as np
import cv2
import os
import math
os.system("fswebcam -r 507x456 --no-banner image11.jpg")
def showImage(capImg):
cv2.imshow('img', capImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = cv2.imread('image11.jpg',-1)
height, width, channel = img.shape
topy= height
topx = width
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_color = np.array([0,255,255])
upper_color = np.array([0,255,255])
mask = cv2.inRange(hsv, lower_color, upper_color)
res = cv2.bitwise_and(img,img, mask=mask)
'''def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img, (x,y), 100, (255,255,255), -1)'''
'''cap = cv2.VideoCapture(-1)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('hjhj', gray)
if cv2.waitKey(0) & 0xFF -- ord('q'):
break
cap.release()
cv2.destroyAllWindows()'''
propx = (topx/512)
propy = (topy/512)
'''lineX1 = int(0*propx)
lineY2 = int(0*propy)
lineX2 = int(511*propx)
lineY1 = int(511*propy)
img = cv2.line(img, (lineX1,lineY1), (lineX2, lineY2), (255,255,255), 5)'''
w = 100*(propx+propy)/2
x1 = int(topx/2 - w/2)
x2 = int(topx/2 + w/2)
y1 = int(topy/2 + w/2)
y2 = int(topy/2 - w/2)
img = cv2.rectangle(res, (x1,y1), (x2,y2), (0,255,0),3)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
showImage(img)
ret, thresh = cv2.threshold(img, 15, 250, 0)
showImage(thresh)
image, contours, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#showImage(image)
cv2.drawContours(img, contours, 0, (0,255,0), 3)
showImage(img)
print('Num of Contours ', len(contours))
cnt = contours[0]
M = cv2.moments(cnt)
print (M)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
area = cv2.contourArea(cnt)
print (cx)
print (cy)
print (area)
'''xCircle = 40*propx
xCircle = int(xCircle)
yCircle = xCircle
radCircle = xCircle
img = cv2.circle(img, (xCircle, yCircle), radCircle, (0,0,255),-1)
x3 = int(topx - 60*propx)
y3 = int(topy - 110*propy)
minAx = int(50*propx)
majAx = int(100*propy)
img = cv2.ellipse(img, (x3, y3), (minAx,majAx), 0, 0, 360, (0,150,255), -1)'''
'''pt1X = int(70*propx)
pt1Y = int(60*propy)
pt2X = int(154*propx)
pt2Y = int(23*propy)
pt3X = int(500*propx)
pt3Y = int(3*propy)'''
#pts = np.array([[pt1X, pt1Y], [pt2X, pt2Y], [pt3X, pt3Y]], np.int32)
#pts = pts.reshape((-1,1,2))
#img = cv2.polylines(img, [pts], True, (100,100,234))
#font = cv2.FONT_HERSHEY_SIMPLEX
#startPtX = int(240*propx)
#startPtY = int(240*propy)
#scale = 2*(propx + propy)/2
#cv2.putText(img, 'Apurva', (startPtX, startPtY), font, scale, (210, 80, 150), 4, cv2.LINE_AA)
#cv2.imshow("kl", img)
'''cv2.setMouseCallback('kl', draw_circle)'''
''''''
#cv2.imshow('frame', img)
#cv2.imshow('mask',mask)
cv2.imshow('res',res)
'''sd = img[130:200, 175:245]
img[20:90, 140:210]=sd
cv2.imshow("kl", img)'''
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"cv2.drawContours",
"cv2.threshold",
"cv2.inRange",
"cv2.bitwise_and",
"cv2.contourArea",
"cv2.imshow",
"numpy.array",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.moments",
"cv2.findContours",
"os.system",
"cv2.imread"
] |
[((52, 108), 'os.system', 'os.system', (['"""fswebcam -r 507x456 --no-banner image11.jpg"""'], {}), "('fswebcam -r 507x456 --no-banner image11.jpg')\n", (61, 108), False, 'import os\n'), ((217, 246), 'cv2.imread', 'cv2.imread', (['"""image11.jpg"""', '(-1)'], {}), "('image11.jpg', -1)\n", (227, 246), False, 'import cv2\n'), ((314, 350), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (326, 350), False, 'import cv2\n'), ((366, 389), 'numpy.array', 'np.array', (['[0, 255, 255]'], {}), '([0, 255, 255])\n', (374, 389), True, 'import numpy as np\n'), ((402, 425), 'numpy.array', 'np.array', (['[0, 255, 255]'], {}), '([0, 255, 255])\n', (410, 425), True, 'import numpy as np\n'), ((432, 474), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_color', 'upper_color'], {}), '(hsv, lower_color, upper_color)\n', (443, 474), False, 'import cv2\n'), ((482, 518), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (497, 518), False, 'import cv2\n'), ((1256, 1310), 'cv2.rectangle', 'cv2.rectangle', (['res', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(3)'], {}), '(res, (x1, y1), (x2, y2), (0, 255, 0), 3)\n', (1269, 1310), False, 'import cv2\n'), ((1313, 1350), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1325, 1350), False, 'import cv2\n'), ((1381, 1411), 'cv2.threshold', 'cv2.threshold', (['img', '(15)', '(250)', '(0)'], {}), '(img, 15, 250, 0)\n', (1394, 1411), False, 'import cv2\n'), ((1460, 1524), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1476, 1524), False, 'import cv2\n'), ((1543, 1593), 'cv2.drawContours', 'cv2.drawContours', (['img', 'contours', '(0)', '(0, 255, 0)', '(3)'], {}), '(img, contours, 0, (0, 255, 0), 3)\n', (1559, 1593), False, 'import cv2\n'), ((1672, 1688), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (1683, 1688), False, 'import cv2\n'), ((1763, 1783), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (1778, 1783), False, 'import cv2\n'), ((2782, 2804), 'cv2.imshow', 'cv2.imshow', (['"""res"""', 'res'], {}), "('res', res)\n", (2792, 2804), False, 'import cv2\n'), ((2884, 2898), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2895, 2898), False, 'import cv2\n'), ((2899, 2922), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2920, 2922), False, 'import cv2\n'), ((137, 162), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'capImg'], {}), "('img', capImg)\n", (147, 162), False, 'import cv2\n'), ((167, 181), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (178, 181), False, 'import cv2\n'), ((186, 209), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (207, 209), False, 'import cv2\n')]
|
import json
from optimism.JaxConfig import *
from optimism import Mesh
def read_json_mesh(meshFileName):
with open(meshFileName, 'r', encoding='utf-8') as jsonFile:
meshData = json.load(jsonFile)
coordinates = np.array(meshData['coordinates'])
connectivity = np.array(meshData['connectivity'], dtype=int)
nodeSets = {}
for key in meshData['nodeSets']:
nodeSets[key] = np.array(meshData['nodeSets'][key])
sideSets = {}
exodusSideSets = meshData['sideSets']
for key in exodusSideSets:
elements = np.array(exodusSideSets[key][0], dtype=int)
sides = np.array(exodusSideSets[key][1], dtype=int)
sideSets[key] = np.column_stack((elements, sides))
blocks=None
return Mesh.construct_mesh_from_basic_data(coordinates, connectivity,
blocks, nodeSets, sideSets)
|
[
"json.load",
"optimism.Mesh.construct_mesh_from_basic_data"
] |
[((763, 857), 'optimism.Mesh.construct_mesh_from_basic_data', 'Mesh.construct_mesh_from_basic_data', (['coordinates', 'connectivity', 'blocks', 'nodeSets', 'sideSets'], {}), '(coordinates, connectivity, blocks,\n nodeSets, sideSets)\n', (798, 857), False, 'from optimism import Mesh\n'), ((191, 210), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (200, 210), False, 'import json\n')]
|
"""Linux-specific code"""
from pysyte.types import paths
def xdg_home():
"""path to $XDG_CONFIG_HOME
>>> assert xdg_home() == paths.path('~/.config').expand()
"""
return paths.environ_path("XDG_CONFIG_HOME", "~/.config")
def xdg_home_config(filename):
"""path to that file in $XDG_CONFIG_HOME
>>> assert xdg_home_config('fred') == paths.path('~/.config/fred').expand()
"""
return xdg_home() / filename
def xdg_dirs():
"""paths in $XDG_CONFIG_DIRS"""
return paths.environ_paths("XDG_CONFIG_DIRS")
def xdg_homes():
return [xdg_home()]
bash_paste = "xclip -selection clipboard"
bash_copy = "xclip -selection clipboard -o"
|
[
"pysyte.types.paths.environ_paths",
"pysyte.types.paths.environ_path"
] |
[((191, 241), 'pysyte.types.paths.environ_path', 'paths.environ_path', (['"""XDG_CONFIG_HOME"""', '"""~/.config"""'], {}), "('XDG_CONFIG_HOME', '~/.config')\n", (209, 241), False, 'from pysyte.types import paths\n'), ((507, 545), 'pysyte.types.paths.environ_paths', 'paths.environ_paths', (['"""XDG_CONFIG_DIRS"""'], {}), "('XDG_CONFIG_DIRS')\n", (526, 545), False, 'from pysyte.types import paths\n')]
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract Base Class for quantize emulation in custom keras layers."""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class QuantizeEmulatableLayer(object):
"""Abstract Base Class for quantize emulation in custom keras layers.
Custom keras layers which want to implement quantization of their operations
during training should implement this class.
"""
@abc.abstractmethod
def get_quantizable_weights(self):
"""Returns list of quantizable weight tensors.
All the weight tensors which the layer wants to be quantized during
training must be returned by this method.
Returns: List of weight tensors/kernels in the keras layer which must be
quantized during training.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def set_quantizable_weights(self, weights):
"""Sets list of quantizable weight tensors.
This method replaces the existing quantizable weight tensors for
the layer with the specified set of weights.
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_quantizable_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
raise NotImplementedError('Must be implemented in subclasses.')
|
[
"six.add_metaclass"
] |
[((788, 818), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (805, 818), False, 'import six\n')]
|
# Standard imports
import sys
# Project imports
from icecreamscrape.cli import cli
from icecreamscrape.webdriver import driver_factory
from icecreamscrape import composites as comps
from icecreamscrape.composites import create_timestamped_dir
def main(args=sys.argv[1:]):
""" Main function. :param: args is used for testing """
user_inputs = cli(args)
url = user_inputs.params.url
active_features = user_inputs.active_features
if len(active_features) > 0:
time_dir = create_timestamped_dir()
with driver_factory(url) as driver:
for feature in active_features:
getattr(sys.modules[comps.__name__],
feature)(driver, time_dir)
def init():
""" Init construction allows for testing """
if __name__ == "__main__":
sys.exit(main())
init()
|
[
"icecreamscrape.cli.cli",
"icecreamscrape.webdriver.driver_factory",
"icecreamscrape.composites.create_timestamped_dir"
] |
[((347, 356), 'icecreamscrape.cli.cli', 'cli', (['args'], {}), '(args)\n', (350, 356), False, 'from icecreamscrape.cli import cli\n'), ((477, 501), 'icecreamscrape.composites.create_timestamped_dir', 'create_timestamped_dir', ([], {}), '()\n', (499, 501), False, 'from icecreamscrape.composites import create_timestamped_dir\n'), ((509, 528), 'icecreamscrape.webdriver.driver_factory', 'driver_factory', (['url'], {}), '(url)\n', (523, 528), False, 'from icecreamscrape.webdriver import driver_factory\n')]
|
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.controllers.mission.mission_unit_test.py is part of The
# RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for testing Mission module algorithms and models."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKMissionRecord
from ramstk.models.dbtables import RAMSTKMissionTable
from tests import (
MockDAO,
UnitTestDeleteMethods,
UnitTestGetterSetterMethods,
UnitTestInsertMethods,
UnitTestSelectMethods,
)
@pytest.mark.usefixtures("test_record_model", "unit_test_table_model")
class TestCreateMissionModels:
"""Class for unit testing Mission model __init__() methods.
Because each table model contains unique attributes, these methods must be
local to the module being tested.
"""
__test__ = True
@pytest.mark.unit
def test_record_model_create(self, test_record_model):
"""Should return a Mission record model instance."""
assert isinstance(test_record_model, RAMSTKMissionRecord)
# Verify class attributes are properly initialized.
assert test_record_model.__tablename__ == "ramstk_mission"
assert test_record_model.revision_id == 1
assert test_record_model.description == "Test mission #1"
assert test_record_model.mission_time == 100.0
assert test_record_model.time_units == "hours"
@pytest.mark.unit
def test_data_manager_create(self, unit_test_table_model):
"""Return a Mission table model instance."""
assert isinstance(unit_test_table_model, RAMSTKMissionTable)
assert isinstance(unit_test_table_model.tree, Tree)
assert isinstance(unit_test_table_model.dao, MockDAO)
assert unit_test_table_model._db_id_colname == "fld_mission_id"
assert unit_test_table_model._db_tablename == "ramstk_mission"
assert unit_test_table_model._tag == "mission"
assert unit_test_table_model._root == 0
assert unit_test_table_model._revision_id == 0
assert pub.isSubscribed(
unit_test_table_model.do_get_attributes, "request_get_mission_attributes"
)
assert pub.isSubscribed(
unit_test_table_model.do_get_tree, "request_get_mission_tree"
)
assert pub.isSubscribed(
unit_test_table_model.do_select_all, "selected_revision"
)
assert pub.isSubscribed(
unit_test_table_model.do_update, "request_update_mission"
)
assert pub.isSubscribed(
unit_test_table_model.do_update_all, "request_update_all_mission"
)
assert pub.isSubscribed(
unit_test_table_model.do_delete, "request_delete_mission"
)
assert pub.isSubscribed(
unit_test_table_model.do_insert, "request_insert_mission"
)
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestSelectMission(UnitTestSelectMethods):
"""Class for unit testing Mission table do_select() and do_select_all()."""
__test__ = True
_record = RAMSTKMissionRecord
_tag = "mission"
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestInsertMission(UnitTestInsertMethods):
"""Class for unit testing Mission table do_insert() method."""
__test__ = True
_next_id = 0
_record = RAMSTKMissionRecord
_tag = "mission"
@pytest.mark.skip(reason="Mission records are non-hierarchical.")
def test_do_insert_child(self, test_attributes, unit_test_table_model):
"""Should not run because Mission records are not hierarchical."""
pass
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestDeleteMission(UnitTestDeleteMethods):
"""Class for unit testing Mission table do_delete() method."""
__test__ = True
_next_id = 0
_record = RAMSTKMissionRecord
_tag = "mission"
@pytest.mark.usefixtures("test_attributes", "test_record_model")
class TestGetterSetterMission(UnitTestGetterSetterMethods):
"""Class for unit testing Mission table methods that get or set."""
__test__ = True
_id_columns = [
"revision_id",
"mission_id",
]
_test_attr = "mission_time"
_test_default_value = 0.0
@pytest.mark.unit
def test_get_record_model_attributes(self, test_record_model):
"""Should return a dict of attribute key:value pairs.
This method must be local because the attributes are different for each
database record model.
"""
_attributes = test_record_model.get_attributes()
assert isinstance(_attributes, dict)
assert _attributes["revision_id"] == 1
assert _attributes["description"] == "Test mission #1"
assert _attributes["mission_time"] == 100.0
assert _attributes["time_units"] == "hours"
|
[
"pytest.mark.skip",
"pytest.mark.usefixtures",
"pubsub.pub.isSubscribed"
] |
[((708, 777), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""test_record_model"""', '"""unit_test_table_model"""'], {}), "('test_record_model', 'unit_test_table_model')\n", (731, 777), False, 'import pytest\n'), ((3035, 3102), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""test_attributes"""', '"""unit_test_table_model"""'], {}), "('test_attributes', 'unit_test_table_model')\n", (3058, 3102), False, 'import pytest\n'), ((3311, 3378), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""test_attributes"""', '"""unit_test_table_model"""'], {}), "('test_attributes', 'unit_test_table_model')\n", (3334, 3378), False, 'import pytest\n'), ((3826, 3893), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""test_attributes"""', '"""unit_test_table_model"""'], {}), "('test_attributes', 'unit_test_table_model')\n", (3849, 3893), False, 'import pytest\n'), ((4106, 4169), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""test_attributes"""', '"""test_record_model"""'], {}), "('test_attributes', 'test_record_model')\n", (4129, 4169), False, 'import pytest\n'), ((3594, 3658), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Mission records are non-hierarchical."""'}), "(reason='Mission records are non-hierarchical.')\n", (3610, 3658), False, 'import pytest\n'), ((2229, 2324), 'pubsub.pub.isSubscribed', 'pub.isSubscribed', (['unit_test_table_model.do_get_attributes', '"""request_get_mission_attributes"""'], {}), "(unit_test_table_model.do_get_attributes,\n 'request_get_mission_attributes')\n", (2245, 2324), False, 'from pubsub import pub\n'), ((2358, 2437), 'pubsub.pub.isSubscribed', 'pub.isSubscribed', (['unit_test_table_model.do_get_tree', '"""request_get_mission_tree"""'], {}), "(unit_test_table_model.do_get_tree, 'request_get_mission_tree')\n", (2374, 2437), False, 'from pubsub import pub\n'), ((2475, 2549), 'pubsub.pub.isSubscribed', 'pub.isSubscribed', (['unit_test_table_model.do_select_all', '"""selected_revision"""'], {}), "(unit_test_table_model.do_select_all, 'selected_revision')\n", (2491, 2549), False, 'from pubsub import pub\n'), ((2587, 2662), 'pubsub.pub.isSubscribed', 'pub.isSubscribed', (['unit_test_table_model.do_update', '"""request_update_mission"""'], {}), "(unit_test_table_model.do_update, 'request_update_mission')\n", (2603, 2662), False, 'from pubsub import pub\n'), ((2700, 2787), 'pubsub.pub.isSubscribed', 'pub.isSubscribed', (['unit_test_table_model.do_update_all', '"""request_update_all_mission"""'], {}), "(unit_test_table_model.do_update_all,\n 'request_update_all_mission')\n", (2716, 2787), False, 'from pubsub import pub\n'), ((2821, 2896), 'pubsub.pub.isSubscribed', 'pub.isSubscribed', (['unit_test_table_model.do_delete', '"""request_delete_mission"""'], {}), "(unit_test_table_model.do_delete, 'request_delete_mission')\n", (2837, 2896), False, 'from pubsub import pub\n'), ((2934, 3009), 'pubsub.pub.isSubscribed', 'pub.isSubscribed', (['unit_test_table_model.do_insert', '"""request_insert_mission"""'], {}), "(unit_test_table_model.do_insert, 'request_insert_mission')\n", (2950, 3009), False, 'from pubsub import pub\n')]
|
from pheasant.renderers.jupyter.jupyter import Jupyter
jupyter = Jupyter()
jupyter.findall("{{3}}3{{5}}")
jupyter.page
|
[
"pheasant.renderers.jupyter.jupyter.Jupyter"
] |
[((66, 75), 'pheasant.renderers.jupyter.jupyter.Jupyter', 'Jupyter', ([], {}), '()\n', (73, 75), False, 'from pheasant.renderers.jupyter.jupyter import Jupyter\n')]
|
from app.app import create_app
from config import BaseConfig
app = create_app(BaseConfig)
|
[
"app.app.create_app"
] |
[((68, 90), 'app.app.create_app', 'create_app', (['BaseConfig'], {}), '(BaseConfig)\n', (78, 90), False, 'from app.app import create_app\n')]
|
# This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
from unittest import mock
from unittest.mock import MagicMock
import pytest
import pynguin.configuration as config
import pynguin.testcase.defaulttestcase as dtc
import pynguin.testcase.statements.primitivestatements as prim
import pynguin.testcase.testcase as tc
import pynguin.testcase.variable.variablereferenceimpl as vri
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_value(statement_type, test_case_mock, value):
statement = statement_type(test_case_mock, value)
assert statement.value == value
@pytest.mark.parametrize(
"statement_type",
[
pytest.param(prim.IntPrimitiveStatement),
pytest.param(prim.FloatPrimitiveStatement),
pytest.param(prim.StringPrimitiveStatement),
pytest.param(prim.BooleanPrimitiveStatement),
],
)
def test_primitive_statement_value_none(statement_type, test_case_mock):
statement = statement_type(test_case_mock, None)
assert statement.value is not None
@pytest.mark.parametrize(
"statement_type,value,new_value",
[
pytest.param(prim.IntPrimitiveStatement, 42, 23),
pytest.param(prim.FloatPrimitiveStatement, 2.1, 1.2),
pytest.param(prim.StringPrimitiveStatement, "foo", "bar"),
pytest.param(prim.BooleanPrimitiveStatement, True, False),
],
)
def test_primitive_statement_set_value(
statement_type, test_case_mock, value, new_value
):
statement = statement_type(test_case_mock, value)
statement.value = new_value
assert statement.value == new_value
@pytest.mark.parametrize(
"statement_type,test_case,new_test_case,value",
[
pytest.param(
prim.IntPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
42,
),
pytest.param(
prim.FloatPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
42.23,
),
pytest.param(
prim.StringPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
"foo",
),
pytest.param(
prim.BooleanPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
True,
),
],
)
def test_primitive_statement_clone(statement_type, test_case, new_test_case, value):
statement = statement_type(test_case, value)
new_statement = statement.clone(new_test_case)
assert new_statement.test_case == new_test_case
assert (
new_statement.return_value.variable_type == statement.return_value.variable_type
)
assert new_statement.value == statement.value
@pytest.mark.parametrize(
"statement_type,test_case,value,visitor_method",
[
pytest.param(
prim.IntPrimitiveStatement,
MagicMock(tc.TestCase),
42,
"visit_int_primitive_statement",
),
pytest.param(
prim.FloatPrimitiveStatement,
MagicMock(tc.TestCase),
2.1,
"visit_float_primitive_statement",
),
pytest.param(
prim.StringPrimitiveStatement,
MagicMock(tc.TestCase),
"foo",
"visit_string_primitive_statement",
),
pytest.param(
prim.BooleanPrimitiveStatement,
MagicMock(tc.TestCase),
True,
"visit_boolean_primitive_statement",
),
],
)
def test_primitive_statement_accept(statement_type, test_case, value, visitor_method):
stmt = statement_type(test_case, value)
visitor = MagicMock()
stmt.accept(visitor)
getattr(visitor, visitor_method).assert_called_once_with(stmt)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_equals_same(statement_type, value):
test_case = MagicMock(tc.TestCase)
statement = statement_type(test_case, value)
assert statement.__eq__(statement)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_equals_other_type(statement_type, value):
test_case = MagicMock(tc.TestCase)
statement = statement_type(test_case, value)
assert not statement.__eq__(test_case)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_equals_clone(statement_type, value):
test_case = MagicMock(tc.TestCase)
statement = statement_type(test_case, value)
test_case.statements = [statement]
test_case2 = MagicMock(tc.TestCase)
clone = statement.clone(test_case2)
test_case2.statements = [clone]
assert statement.__eq__(clone)
def test_none_statement_equals_clone():
test_case = MagicMock(tc.TestCase)
statement = prim.NoneStatement(test_case, type(None))
test_case.statements = [statement]
test_case2 = MagicMock(tc.TestCase)
clone = statement.clone(test_case2)
test_case2.statements = [clone]
assert statement.__eq__(clone)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_hash(statement_type, value):
statement = statement_type(MagicMock(tc.TestCase), value)
assert statement.__hash__() != 0
def test_int_primitive_statement_randomize_value(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert isinstance(statement.value, int)
def test_float_primitive_statement_randomize_value(test_case_mock):
statement = prim.FloatPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert isinstance(statement.value, float)
def test_bool_primitive_statement_randomize_value(test_case_mock):
statement = prim.BooleanPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert statement.value or not statement.value
def test_string_primitive_statement_randomize_value(test_case_mock):
statement = prim.StringPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert 0 <= len(statement.value) <= config.INSTANCE.string_length
def test_none_statement_randomize_value(test_case_mock):
statement = prim.NoneStatement(test_case_mock, type(None))
statement.randomize_value()
assert statement.value is None
def test_none_statement_delta(test_case_mock):
statement = prim.NoneStatement(test_case_mock, type(None))
statement.delta()
assert statement.value is None
def test_string_primitive_statement_random_deletion(test_case_mock):
sample = list("Test")
result = prim.StringPrimitiveStatement._random_deletion(sample)
assert len(result) <= len(sample)
def test_string_primitive_statement_random_insertion(test_case_mock):
sample = list("Test")
result = prim.StringPrimitiveStatement._random_insertion(sample)
assert len(result) >= len(sample)
def test_string_primitive_statement_random_insertion_empty(test_case_mock):
sample = list("")
result = prim.StringPrimitiveStatement._random_insertion(sample)
assert len(result) >= len(sample)
def test_string_primitive_statement_random_replacement(test_case_mock):
sample = list("Test")
result = prim.StringPrimitiveStatement._random_replacement(sample)
assert len(result) == len(sample)
def test_string_primitive_statement_delta_none(test_case_mock):
value = "t"
statement = prim.StringPrimitiveStatement(test_case_mock, value)
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.side_effect = [1.0, 1.0, 1.0]
statement.delta()
assert statement.value == value
def test_string_primitive_statement_delta_all(test_case_mock):
value = "te"
statement = prim.StringPrimitiveStatement(test_case_mock, value)
with mock.patch("pynguin.utils.randomness.next_char") as char_mock:
char_mock.side_effect = ["a", "b"]
with mock.patch("pynguin.utils.randomness.next_int") as int_mock:
int_mock.return_value = 0
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
deletion = [0.0, 0.0, 1.0]
replacement = [0.0, 0.0]
insertion = [0.0, 0.0, 1.0]
float_mock.side_effect = deletion + replacement + insertion
statement.delta()
assert statement.value == "ba"
def test_int_primitive_statement_delta(test_case_mock):
config.INSTANCE.max_delta = 10
statement = prim.IntPrimitiveStatement(test_case_mock, 1)
with mock.patch("pynguin.utils.randomness.next_gaussian") as gauss_mock:
gauss_mock.return_value = 0.5
statement.delta()
assert statement.value == 6
def test_float_primitive_statement_delta_max(test_case_mock):
config.INSTANCE.max_delta = 10
statement = prim.FloatPrimitiveStatement(test_case_mock, 1.5)
with mock.patch("pynguin.utils.randomness.next_gaussian") as gauss_mock:
gauss_mock.return_value = 0.5
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.return_value = 0.0
statement.delta()
assert statement.value == 6.5
def test_float_primitive_statement_delta_gauss(test_case_mock):
config.INSTANCE.max_delta = 10
statement = prim.FloatPrimitiveStatement(test_case_mock, 1.0)
with mock.patch("pynguin.utils.randomness.next_gaussian") as gauss_mock:
gauss_mock.return_value = 0.5
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.return_value = 1.0 / 3.0
statement.delta()
assert statement.value == 1.5
def test_float_primitive_statement_delta_round(test_case_mock):
statement = prim.FloatPrimitiveStatement(test_case_mock, 1.2345)
with mock.patch("pynguin.utils.randomness.next_int") as int_mock:
int_mock.return_value = 2
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.return_value = 2.0 / 3.0
statement.delta()
assert statement.value == 1.23
def test_boolean_primitive_statement_delta(test_case_mock):
statement = prim.BooleanPrimitiveStatement(test_case_mock, True)
statement.delta()
assert not statement.value
def test_primitive_statement_mutate(test_case_mock):
statement = prim.BooleanPrimitiveStatement(test_case_mock, True)
statement.mutate()
assert not statement.value
def test_primitive_statement_accessible(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
assert statement.accessible_object() is None
def test_primitive_statement_references(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
assert {statement.return_value} == statement.get_variable_references()
def test_primitive_statement_replace(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
new = vri.VariableReferenceImpl(test_case_mock, int)
statement.replace(statement.return_value, new)
assert statement.return_value == new
def test_primitive_statement_replace_ignore(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
new = prim.FloatPrimitiveStatement(test_case_mock, 0).return_value
old = statement.return_value
statement.replace(new, new)
assert statement.return_value == old
def test_primitive_statement_get_position():
test_case = dtc.DefaultTestCase()
statement = prim.IntPrimitiveStatement(test_case, 5)
test_case.add_statement(statement)
assert statement.get_position() == 0
|
[
"pynguin.testcase.variable.variablereferenceimpl.VariableReferenceImpl",
"pynguin.testcase.statements.primitivestatements.FloatPrimitiveStatement",
"pynguin.testcase.defaulttestcase.DefaultTestCase",
"unittest.mock.MagicMock",
"pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement",
"pytest.param",
"pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement._random_replacement",
"pynguin.testcase.statements.primitivestatements.BooleanPrimitiveStatement",
"pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement._random_insertion",
"pynguin.testcase.statements.primitivestatements.IntPrimitiveStatement",
"unittest.mock.patch",
"pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement._random_deletion"
] |
[((4564, 4575), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4573, 4575), False, 'from unittest.mock import MagicMock\n'), ((5053, 5075), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (5062, 5075), False, 'from unittest.mock import MagicMock\n'), ((5555, 5577), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (5564, 5577), False, 'from unittest.mock import MagicMock\n'), ((6056, 6078), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (6065, 6078), False, 'from unittest.mock import MagicMock\n'), ((6184, 6206), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (6193, 6206), False, 'from unittest.mock import MagicMock\n'), ((6376, 6398), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (6385, 6398), False, 'from unittest.mock import MagicMock\n'), ((6513, 6535), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (6522, 6535), False, 'from unittest.mock import MagicMock\n'), ((7192, 7234), 'pynguin.testcase.statements.primitivestatements.IntPrimitiveStatement', 'prim.IntPrimitiveStatement', (['test_case_mock'], {}), '(test_case_mock)\n', (7218, 7234), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((7397, 7441), 'pynguin.testcase.statements.primitivestatements.FloatPrimitiveStatement', 'prim.FloatPrimitiveStatement', (['test_case_mock'], {}), '(test_case_mock)\n', (7425, 7441), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((7605, 7651), 'pynguin.testcase.statements.primitivestatements.BooleanPrimitiveStatement', 'prim.BooleanPrimitiveStatement', (['test_case_mock'], {}), '(test_case_mock)\n', (7635, 7651), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((7821, 7866), 'pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement', 'prim.StringPrimitiveStatement', (['test_case_mock'], {}), '(test_case_mock)\n', (7850, 7866), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((8437, 8491), 'pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement._random_deletion', 'prim.StringPrimitiveStatement._random_deletion', (['sample'], {}), '(sample)\n', (8483, 8491), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((8641, 8696), 'pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement._random_insertion', 'prim.StringPrimitiveStatement._random_insertion', (['sample'], {}), '(sample)\n', (8688, 8696), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((8848, 8903), 'pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement._random_insertion', 'prim.StringPrimitiveStatement._random_insertion', (['sample'], {}), '(sample)\n', (8895, 8903), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((9055, 9112), 'pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement._random_replacement', 'prim.StringPrimitiveStatement._random_replacement', (['sample'], {}), '(sample)\n', (9104, 9112), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((9249, 9301), 'pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement', 'prim.StringPrimitiveStatement', (['test_case_mock', 'value'], {}), '(test_case_mock, value)\n', (9278, 9301), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((9589, 9641), 'pynguin.testcase.statements.primitivestatements.StringPrimitiveStatement', 'prim.StringPrimitiveStatement', (['test_case_mock', 'value'], {}), '(test_case_mock, value)\n', (9618, 9641), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((10345, 10390), 'pynguin.testcase.statements.primitivestatements.IntPrimitiveStatement', 'prim.IntPrimitiveStatement', (['test_case_mock', '(1)'], {}), '(test_case_mock, 1)\n', (10371, 10390), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((10679, 10728), 'pynguin.testcase.statements.primitivestatements.FloatPrimitiveStatement', 'prim.FloatPrimitiveStatement', (['test_case_mock', '(1.5)'], {}), '(test_case_mock, 1.5)\n', (10707, 10728), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((11153, 11202), 'pynguin.testcase.statements.primitivestatements.FloatPrimitiveStatement', 'prim.FloatPrimitiveStatement', (['test_case_mock', '(1.0)'], {}), '(test_case_mock, 1.0)\n', (11181, 11202), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((11598, 11650), 'pynguin.testcase.statements.primitivestatements.FloatPrimitiveStatement', 'prim.FloatPrimitiveStatement', (['test_case_mock', '(1.2345)'], {}), '(test_case_mock, 1.2345)\n', (11626, 11650), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((12032, 12084), 'pynguin.testcase.statements.primitivestatements.BooleanPrimitiveStatement', 'prim.BooleanPrimitiveStatement', (['test_case_mock', '(True)'], {}), '(test_case_mock, True)\n', (12062, 12084), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((12209, 12261), 'pynguin.testcase.statements.primitivestatements.BooleanPrimitiveStatement', 'prim.BooleanPrimitiveStatement', (['test_case_mock', '(True)'], {}), '(test_case_mock, True)\n', (12239, 12261), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((12391, 12436), 'pynguin.testcase.statements.primitivestatements.IntPrimitiveStatement', 'prim.IntPrimitiveStatement', (['test_case_mock', '(0)'], {}), '(test_case_mock, 0)\n', (12417, 12436), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((12561, 12606), 'pynguin.testcase.statements.primitivestatements.IntPrimitiveStatement', 'prim.IntPrimitiveStatement', (['test_case_mock', '(0)'], {}), '(test_case_mock, 0)\n', (12587, 12606), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((12754, 12799), 'pynguin.testcase.statements.primitivestatements.IntPrimitiveStatement', 'prim.IntPrimitiveStatement', (['test_case_mock', '(0)'], {}), '(test_case_mock, 0)\n', (12780, 12799), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((12810, 12856), 'pynguin.testcase.variable.variablereferenceimpl.VariableReferenceImpl', 'vri.VariableReferenceImpl', (['test_case_mock', 'int'], {}), '(test_case_mock, int)\n', (12835, 12856), True, 'import pynguin.testcase.variable.variablereferenceimpl as vri\n'), ((13028, 13073), 'pynguin.testcase.statements.primitivestatements.IntPrimitiveStatement', 'prim.IntPrimitiveStatement', (['test_case_mock', '(0)'], {}), '(test_case_mock, 0)\n', (13054, 13073), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((13314, 13335), 'pynguin.testcase.defaulttestcase.DefaultTestCase', 'dtc.DefaultTestCase', ([], {}), '()\n', (13333, 13335), True, 'import pynguin.testcase.defaulttestcase as dtc\n'), ((13352, 13392), 'pynguin.testcase.statements.primitivestatements.IntPrimitiveStatement', 'prim.IntPrimitiveStatement', (['test_case', '(5)'], {}), '(test_case, 5)\n', (13378, 13392), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((1073, 1117), 'pytest.param', 'pytest.param', (['prim.IntPrimitiveStatement', '(42)'], {}), '(prim.IntPrimitiveStatement, 42)\n', (1085, 1117), False, 'import pytest\n'), ((1127, 1176), 'pytest.param', 'pytest.param', (['prim.FloatPrimitiveStatement', '(42.23)'], {}), '(prim.FloatPrimitiveStatement, 42.23)\n', (1139, 1176), False, 'import pytest\n'), ((1186, 1236), 'pytest.param', 'pytest.param', (['prim.StringPrimitiveStatement', '"""foo"""'], {}), "(prim.StringPrimitiveStatement, 'foo')\n", (1198, 1236), False, 'import pytest\n'), ((1246, 1296), 'pytest.param', 'pytest.param', (['prim.BooleanPrimitiveStatement', '(True)'], {}), '(prim.BooleanPrimitiveStatement, True)\n', (1258, 1296), False, 'import pytest\n'), ((1536, 1576), 'pytest.param', 'pytest.param', (['prim.IntPrimitiveStatement'], {}), '(prim.IntPrimitiveStatement)\n', (1548, 1576), False, 'import pytest\n'), ((1586, 1628), 'pytest.param', 'pytest.param', (['prim.FloatPrimitiveStatement'], {}), '(prim.FloatPrimitiveStatement)\n', (1598, 1628), False, 'import pytest\n'), ((1638, 1681), 'pytest.param', 'pytest.param', (['prim.StringPrimitiveStatement'], {}), '(prim.StringPrimitiveStatement)\n', (1650, 1681), False, 'import pytest\n'), ((1691, 1735), 'pytest.param', 'pytest.param', (['prim.BooleanPrimitiveStatement'], {}), '(prim.BooleanPrimitiveStatement)\n', (1703, 1735), False, 'import pytest\n'), ((1991, 2039), 'pytest.param', 'pytest.param', (['prim.IntPrimitiveStatement', '(42)', '(23)'], {}), '(prim.IntPrimitiveStatement, 42, 23)\n', (2003, 2039), False, 'import pytest\n'), ((2049, 2101), 'pytest.param', 'pytest.param', (['prim.FloatPrimitiveStatement', '(2.1)', '(1.2)'], {}), '(prim.FloatPrimitiveStatement, 2.1, 1.2)\n', (2061, 2101), False, 'import pytest\n'), ((2111, 2168), 'pytest.param', 'pytest.param', (['prim.StringPrimitiveStatement', '"""foo"""', '"""bar"""'], {}), "(prim.StringPrimitiveStatement, 'foo', 'bar')\n", (2123, 2168), False, 'import pytest\n'), ((2178, 2235), 'pytest.param', 'pytest.param', (['prim.BooleanPrimitiveStatement', '(True)', '(False)'], {}), '(prim.BooleanPrimitiveStatement, True, False)\n', (2190, 2235), False, 'import pytest\n'), ((4738, 4782), 'pytest.param', 'pytest.param', (['prim.IntPrimitiveStatement', '(42)'], {}), '(prim.IntPrimitiveStatement, 42)\n', (4750, 4782), False, 'import pytest\n'), ((4792, 4841), 'pytest.param', 'pytest.param', (['prim.FloatPrimitiveStatement', '(42.23)'], {}), '(prim.FloatPrimitiveStatement, 42.23)\n', (4804, 4841), False, 'import pytest\n'), ((4851, 4901), 'pytest.param', 'pytest.param', (['prim.StringPrimitiveStatement', '"""foo"""'], {}), "(prim.StringPrimitiveStatement, 'foo')\n", (4863, 4901), False, 'import pytest\n'), ((4911, 4961), 'pytest.param', 'pytest.param', (['prim.BooleanPrimitiveStatement', '(True)'], {}), '(prim.BooleanPrimitiveStatement, True)\n', (4923, 4961), False, 'import pytest\n'), ((5234, 5278), 'pytest.param', 'pytest.param', (['prim.IntPrimitiveStatement', '(42)'], {}), '(prim.IntPrimitiveStatement, 42)\n', (5246, 5278), False, 'import pytest\n'), ((5288, 5337), 'pytest.param', 'pytest.param', (['prim.FloatPrimitiveStatement', '(42.23)'], {}), '(prim.FloatPrimitiveStatement, 42.23)\n', (5300, 5337), False, 'import pytest\n'), ((5347, 5397), 'pytest.param', 'pytest.param', (['prim.StringPrimitiveStatement', '"""foo"""'], {}), "(prim.StringPrimitiveStatement, 'foo')\n", (5359, 5397), False, 'import pytest\n'), ((5407, 5457), 'pytest.param', 'pytest.param', (['prim.BooleanPrimitiveStatement', '(True)'], {}), '(prim.BooleanPrimitiveStatement, True)\n', (5419, 5457), False, 'import pytest\n'), ((5740, 5784), 'pytest.param', 'pytest.param', (['prim.IntPrimitiveStatement', '(42)'], {}), '(prim.IntPrimitiveStatement, 42)\n', (5752, 5784), False, 'import pytest\n'), ((5794, 5843), 'pytest.param', 'pytest.param', (['prim.FloatPrimitiveStatement', '(42.23)'], {}), '(prim.FloatPrimitiveStatement, 42.23)\n', (5806, 5843), False, 'import pytest\n'), ((5853, 5903), 'pytest.param', 'pytest.param', (['prim.StringPrimitiveStatement', '"""foo"""'], {}), "(prim.StringPrimitiveStatement, 'foo')\n", (5865, 5903), False, 'import pytest\n'), ((5913, 5963), 'pytest.param', 'pytest.param', (['prim.BooleanPrimitiveStatement', '(True)'], {}), '(prim.BooleanPrimitiveStatement, True)\n', (5925, 5963), False, 'import pytest\n'), ((7040, 7062), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (7049, 7062), False, 'from unittest.mock import MagicMock\n'), ((6717, 6761), 'pytest.param', 'pytest.param', (['prim.IntPrimitiveStatement', '(42)'], {}), '(prim.IntPrimitiveStatement, 42)\n', (6729, 6761), False, 'import pytest\n'), ((6771, 6820), 'pytest.param', 'pytest.param', (['prim.FloatPrimitiveStatement', '(42.23)'], {}), '(prim.FloatPrimitiveStatement, 42.23)\n', (6783, 6820), False, 'import pytest\n'), ((6830, 6880), 'pytest.param', 'pytest.param', (['prim.StringPrimitiveStatement', '"""foo"""'], {}), "(prim.StringPrimitiveStatement, 'foo')\n", (6842, 6880), False, 'import pytest\n'), ((6890, 6940), 'pytest.param', 'pytest.param', (['prim.BooleanPrimitiveStatement', '(True)'], {}), '(prim.BooleanPrimitiveStatement, True)\n', (6902, 6940), False, 'import pytest\n'), ((9311, 9360), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_float"""'], {}), "('pynguin.utils.randomness.next_float')\n", (9321, 9360), False, 'from unittest import mock\n'), ((9651, 9699), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_char"""'], {}), "('pynguin.utils.randomness.next_char')\n", (9661, 9699), False, 'from unittest import mock\n'), ((10400, 10452), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_gaussian"""'], {}), "('pynguin.utils.randomness.next_gaussian')\n", (10410, 10452), False, 'from unittest import mock\n'), ((10738, 10790), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_gaussian"""'], {}), "('pynguin.utils.randomness.next_gaussian')\n", (10748, 10790), False, 'from unittest import mock\n'), ((11212, 11264), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_gaussian"""'], {}), "('pynguin.utils.randomness.next_gaussian')\n", (11222, 11264), False, 'from unittest import mock\n'), ((11660, 11707), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_int"""'], {}), "('pynguin.utils.randomness.next_int')\n", (11670, 11707), False, 'from unittest import mock\n'), ((13084, 13131), 'pynguin.testcase.statements.primitivestatements.FloatPrimitiveStatement', 'prim.FloatPrimitiveStatement', (['test_case_mock', '(0)'], {}), '(test_case_mock, 0)\n', (13112, 13131), True, 'import pynguin.testcase.statements.primitivestatements as prim\n'), ((2628, 2650), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (2637, 2650), False, 'from unittest.mock import MagicMock\n'), ((2664, 2686), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (2673, 2686), False, 'from unittest.mock import MagicMock\n'), ((2791, 2813), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (2800, 2813), False, 'from unittest.mock import MagicMock\n'), ((2827, 2849), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (2836, 2849), False, 'from unittest.mock import MagicMock\n'), ((2958, 2980), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (2967, 2980), False, 'from unittest.mock import MagicMock\n'), ((2994, 3016), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (3003, 3016), False, 'from unittest.mock import MagicMock\n'), ((3126, 3148), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (3135, 3148), False, 'from unittest.mock import MagicMock\n'), ((3162, 3184), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (3171, 3184), False, 'from unittest.mock import MagicMock\n'), ((3780, 3802), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (3789, 3802), False, 'from unittest.mock import MagicMock\n'), ((3952, 3974), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (3961, 3974), False, 'from unittest.mock import MagicMock\n'), ((4128, 4150), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (4137, 4150), False, 'from unittest.mock import MagicMock\n'), ((4308, 4330), 'unittest.mock.MagicMock', 'MagicMock', (['tc.TestCase'], {}), '(tc.TestCase)\n', (4317, 4330), False, 'from unittest.mock import MagicMock\n'), ((9770, 9817), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_int"""'], {}), "('pynguin.utils.randomness.next_int')\n", (9780, 9817), False, 'from unittest import mock\n'), ((10857, 10906), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_float"""'], {}), "('pynguin.utils.randomness.next_float')\n", (10867, 10906), False, 'from unittest import mock\n'), ((11331, 11380), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_float"""'], {}), "('pynguin.utils.randomness.next_float')\n", (11341, 11380), False, 'from unittest import mock\n'), ((11768, 11817), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_float"""'], {}), "('pynguin.utils.randomness.next_float')\n", (11778, 11817), False, 'from unittest import mock\n'), ((9886, 9935), 'unittest.mock.patch', 'mock.patch', (['"""pynguin.utils.randomness.next_float"""'], {}), "('pynguin.utils.randomness.next_float')\n", (9896, 9935), False, 'from unittest import mock\n')]
|
from pathlib import Path
from src import constants
from src.data.download.utils.download_dataset_zip import download_dataset_zip
def download_tencent_test(
tmp_dir: Path = None,
tqdm_name: str = None,
tqdm_idx: int = None,
):
"""Download the test set of the Tencent Corpus and extract it to the
appropriate directory."""
download_dataset_zip(
name="tencent_test",
data_url=constants.TENCENT_TEST_URL,
output_dir=constants.TENCENT_TEST_DIR,
extracted_name=constants.TENCENT_TEST_ZIP_FOLDER,
tmp_dir=tmp_dir,
tqdm_name=tqdm_name,
tqdm_idx=tqdm_idx,
)
if __name__ == "__main__":
download_tencent_test(tqdm_name="tencent", tqdm_idx=0)
|
[
"src.data.download.utils.download_dataset_zip.download_dataset_zip"
] |
[((349, 588), 'src.data.download.utils.download_dataset_zip.download_dataset_zip', 'download_dataset_zip', ([], {'name': '"""tencent_test"""', 'data_url': 'constants.TENCENT_TEST_URL', 'output_dir': 'constants.TENCENT_TEST_DIR', 'extracted_name': 'constants.TENCENT_TEST_ZIP_FOLDER', 'tmp_dir': 'tmp_dir', 'tqdm_name': 'tqdm_name', 'tqdm_idx': 'tqdm_idx'}), "(name='tencent_test', data_url=constants.\n TENCENT_TEST_URL, output_dir=constants.TENCENT_TEST_DIR, extracted_name\n =constants.TENCENT_TEST_ZIP_FOLDER, tmp_dir=tmp_dir, tqdm_name=\n tqdm_name, tqdm_idx=tqdm_idx)\n", (369, 588), False, 'from src.data.download.utils.download_dataset_zip import download_dataset_zip\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.