id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
36089
|
from maya import cmds
import copy
# TODO: Find a way to have different naming for different production.
# Maybe handle it in the rig directly?
class BaseName(object):
"""
This class handle the naming of object.
Store a name as a list of 'tokens'
When resolved, the tokens are joinned using a 'separator' (normally an underscore)
Also some specific properties exists:
- Side: Generally L/R token
- Prefix: Always the first token
- Suffix: Always the last token
You can resolve a BaseName instance from a string.
>>> name = BaseName('l_eye_jnt')
>>> name.resolve()
'l_eye_jnt'
You can build a BaseName instance manually.
>>> name = BaseName(tokens=('eye',), suffix='jnt', side=BaseName.SIDE_L)
>>> name.resolve()
'l_eye_jnt'
You can add tokens at any time.
>>> name.add_tokens('upp')
>>> name.resolve()
'l_eye_upp_jnt'
You can override a BaseName public properties.
>>> name = BaseName()
>>> name.tokens = ('eye',)
>>> name.resolve()
'eye'
>>> name.suffix = 'jnt'
>>> name.resolve()
'eye_jnt'
>>> name.side = name.SIDE_L
>>> name.resolve()
'l_eye_jnt'
"""
separator = '_'
type_anm = 'anm'
type_anm_grp = 'anm_grp'
type_jnt = 'jnt'
type_rig = 'rig'
type_rig_grp = 'data_grp'
root_anm_name = 'anms'
root_geo_name = 'geos'
root_jnt_name = 'jnts'
root_rig_name = 'data'
root_backup_name = 'backup'
layer_anm_name = 'layer_anm'
layer_rig_name = 'layer_rig'
layer_geo_name = 'layer_geo'
SIDE_L = 'l'
SIDE_R = 'r'
def __init__(self, name=None, tokens=None, prefix=None, suffix=None, side=None):
self.tokens = []
self.prefix = None
self.suffix = None
self.side = None
if name:
tokens = self.build_from_string(name)
# Apply manual overrides
if tokens:
self.tokens = tokens
if prefix:
self.prefix = prefix
if suffix:
self.suffix = suffix
if side:
self.side = side
def copy(self):
"""
Return a copy of the name object.
"""
inst = self.__class__()
inst.tokens = copy.copy(self.tokens)
inst.prefix = self.prefix
inst.suffix = self.suffix
return inst
def rebuild(self, name):
return self.__class__(name, prefix=self.prefix, suffix=self.suffix)
def get_basename(self):
"""
Each name have one single token that represent it's part.
ex: L_LegUpp_Ik_Ctrl -> LegUpp
By default it is the first non-side token in the name.
return: The part name.
"""
for token in self.tokens:
if not self.get_side_from_token(token):
return token
def remove_extra_tokens(self):
"""
Remove any tokens that is not the base token or a side token.
:return:
"""
basename = self.get_basename()
found_base_token = False
new_tokens = []
for token in self.tokens:
if self.get_side_from_token(token):
new_tokens.append(token)
elif not found_base_token and token == basename:
new_tokens.append(token)
self.tokens = new_tokens
def build_from_string(self, name):
raw_tokens = self._get_tokens(name)
self.tokens = []
#self.prefix = None
#self.suffix = None
self.side = None
self.add_tokens(*raw_tokens)
def _get_tokens(self, val):
return val.split(self.separator)
def _join_tokens(self, tokens):
return self.separator.join(tokens)
def add_tokens(self, *args):
for arg in args:
for token in arg.split(self.separator):
side = self.get_side_from_token(token)
if side:
self.side = side
else:
self.tokens.append(token)
def add_suffix(self, suffix):
self.tokens.append(suffix)
def add_prefix(self, prefix):
self.tokens.insert(0, prefix)
def get_unique_name(self, name):
if cmds.objExists(name):
i = 1
while cmds.objExists(name + str(i)):
i += 1
return name + str(i)
return name
@classmethod
def get_side_from_token(cls, token):
token_lower = token.lower()
if token_lower == cls.SIDE_L.lower():
return cls.SIDE_L
if token_lower == cls.SIDE_R.lower():
return cls.SIDE_R
def get_tokens(self):
"""
:return: All token without the side tokens.
"""
return [token for token in self.tokens if not self.get_side_from_token(token)]
def resolve(self, *args):
tokens = []
if self.prefix:
tokens.append(self.prefix)
if self.side:
tokens.append(self.side)
tokens.extend(self.tokens)
tokens.extend(args)
if self.suffix:
tokens.append(self.suffix)
name = self._join_tokens(tokens)
# If we have name conflicts, we WILL want to crash.
'''
# Prevent maya from crashing by guarantying that the name is unique.
if cmds.objExists(name):
name_old = name
name = self.get_unique_name(name)
cmds.warning("Name {0} already exist, using {1} instead.".format(
name_old, name
))
'''
return name
def rename(self, obj, *args):
name = self.resolve(*args)
obj.rename(name)
def __repr__(self):
return self.resolve()
|
36094
|
from datetime import datetime
import traceback
import boto3
from botocore.exceptions import ClientError
from ...config import config
from ...log import log
class Submitter():
def __init__(self, event):
self.event = event
def find_instance(self, instance_id, mac_address): # pylint: disable=R0201
# Instance IDs are unique to the region, not the account, so we have to check them all
report_region = config.get('aws', 'region')
ec2instance = None
ec2_client = boto3.client("ec2")
regions = [region["RegionName"] for region in ec2_client.describe_regions()["Regions"]]
for region in regions:
ec2 = boto3.resource("ec2", region_name=region)
try:
ec2instance = ec2.Instance(instance_id)
found = False
# Confirm the mac address matches
for iface in ec2instance.network_interfaces:
det_mac = mac_address.lower().replace(":", "").replace("-", "")
ins_mac = iface.mac_address.lower().replace(":", "").replace("-", "")
if det_mac == ins_mac:
found = True
if found: # pylint: disable=R1723
return region, ec2instance
except ClientError:
continue
except Exception: # pylint: disable=W0703
trace = traceback.format_exc()
log.exception(str(trace))
continue
return report_region, ec2instance
@staticmethod
def send_to_securityhub(manifest):
client = boto3.client('securityhub', region_name=config.get('aws', 'region'))
check_response = {}
found = False
try:
check_response = client.get_findings(Filters={'Id': [{'Value': manifest["Id"], 'Comparison': 'EQUALS'}]})
for _ in check_response["Findings"]:
found = True
except ClientError:
pass
import_response = False
if not found:
try:
import_response = client.batch_import_findings(Findings=[manifest])
except ClientError as err:
# Boto3 issue communicating with SH, throw the error in the log
log.exception(str(err))
return import_response
def submit(self):
log.info("Processing detection: %s", self.event.detect_description)
det_region = config.get('aws', 'region')
send = False
try:
if self.event.instance_id:
det_region, instance = self.find_instance(self.event.instance_id, self.event.device_details["mac_address"])
if instance is None:
log.warning("Instance %s with MAC address %s not found in regions searched. Alert not processed.",
self.event.instance_id, self.event.device_details["mac_address"])
return
try:
for _ in instance.network_interfaces:
# Only send alerts for instances we can find
send = True
except ClientError:
# Not our instance
i_id = self.event.instance_id
mac = self.event.device_details["mac_address"]
log.info("Instance %s with MAC address %s not found in regions searched. Alert not processed.", i_id, mac)
except AttributeError:
# Instance ID was not provided by the detection
log.info("Instance ID not provided by detection. Alert not processed.")
if send:
sh_payload = self.create_payload(det_region)
response = self.send_to_securityhub(sh_payload)
if not response:
log.info("Detection already submitted to Security Hub. Alert not processed.")
else:
if response["SuccessCount"] > 0:
submit_msg = f"Detection submitted to Security Hub. (Request ID: {response['ResponseMetadata']['RequestId']})"
log.info(submit_msg)
def create_payload(self, instance_region):
region = config.get('aws', 'region')
try:
account_id = boto3.client("sts").get_caller_identity().get('Account')
except KeyError:
# Failed to get endpoint_resolver the first time, try it again
account_id = boto3.client("sts").get_caller_identity().get('Account')
severity_product = self.event.severity_value
severity_normalized = severity_product * 20
payload = {
"SchemaVersion": "2018-10-08",
"ProductArn": "arn:aws:securityhub:{}:517716713836:product/crowdstrike/crowdstrike-falcon".format(region),
"AwsAccountId": account_id,
"SourceUrl": self.event.falcon_link,
"GeneratorId": "Falcon Host",
"CreatedAt": datetime.utcfromtimestamp(float(self.event.event_create_time) / 1000.).isoformat() + 'Z',
"UpdatedAt": ((datetime.utcfromtimestamp(datetime.timestamp(datetime.now()))).isoformat() + 'Z'),
"RecordState": "ACTIVE",
"Severity": {"Product": severity_product, "Normalized": severity_normalized}
}
# Instance ID based detail
try:
payload["Id"] = f"{self.event.instance_id}{self.event.event_id}"
payload["Title"] = "Falcon Alert. Instance: %s" % self.event.instance_id
payload["Resources"] = [{"Type": "AwsEc2Instnace", "Id": self.event.instance_id, "Region": instance_region}]
except AttributeError:
payload["Id"] = f"UnknownInstanceID:{self.event.event_id}"
payload["Title"] = "Falcon Alert"
payload["Resources"] = [{"Type": "Other",
"Id": f"UnknownInstanceId:{self.event.event_id}",
"Region": region
}]
# Description
aws_id = ""
if self.event.cloud_provider_account_id:
aws_id = f"| AWS Account for alerting instance: {self.event.cloud_provider_account_id}"
payload["Description"] = f"{self.event.detect_description} {aws_id}"
# TTPs
try:
payload["Types"] = ["Namespace: TTPs",
"Category: %s" % self.event.original_event["event"]["Tactic"],
"Classifier: %s" % self.event.original_event["event"]["Technique"]
]
except KeyError:
payload.pop("Types", None)
# Running process detail
try:
payload["Process"] = {}
payload["Process"]["Name"] = self.event.original_event["event"]["FileName"]
payload["Process"]["Path"] = self.event.original_event["event"]["FilePath"]
except KeyError:
payload.pop("Process", None)
# Network detail
try:
payload['Network'] = self.network_payload()
except KeyError:
pass
return payload
def network_payload(self):
net = {}
net['Direction'] = \
"IN" if self.event.original_event['event']['NetworkAccesses'][0]['ConnectionDirection'] == 0 else 'OUT'
net['Protocol'] = self.event.original_event['event']['NetworkAccesses'][0]['Protocol']
net['SourceIpV4'] = self.event.original_event['event']['NetworkAccesses'][0]['LocalAddress']
net['SourcePort'] = self.event.original_event['event']['NetworkAccesses'][0]['LocalPort']
net['DestinationIpV4'] = self.event.original_event['event']['NetworkAccesses'][0]['RemoteAddress']
net['DestinationPort'] = self.event.original_event['event']['NetworkAccesses'][0]['RemotePort']
return net
class Runtime():
def __init__(self):
log.info("AWS Backend is enabled.")
def is_relevant(self, falcon_event): # pylint: disable=R0201
return falcon_event.cloud_provider == 'AWS_EC2'
def process(self, falcon_event): # pylint: disable=R0201
Submitter(falcon_event).submit()
__all__ = ['Runtime']
|
36117
|
from zhixuewang.urls import BASE_URL
class Url:
INFO_URL = f"{BASE_URL}/container/container/student/account/"
CHANGE_PASSWORD_URL = f"{BASE_URL}/portalcenter/home/updatePassword/"
TEST_URL = f"{BASE_URL}/container/container/teacher/teacherAccountNew"
GET_EXAM_URL = f"{BASE_URL}/classreport/class/classReportList/"
GET_AcademicTermTeachingCycle_URL = f"{BASE_URL}/classreport/class/getAcademicTermTeachingCycle/"
GET_REPORT_URL = f"{BASE_URL}/exportpaper/class/getExportStudentInfo"
GET_MARKING_PROGRESS_URL = f"{BASE_URL}/marking/marking/markingProgressDetail"
GET_EXAM_DETAIL_URL = f"{BASE_URL}/scanmuster/cloudRec/scanrecognition"
GET_EXAM_SCHOOLS_URL = f"{BASE_URL}/exam/marking/schoolClass"
GET_EXAM_SUBJECTS_URL = f"{BASE_URL}/configure/class/getSubjectsIncludeSubAndGroup"
#后必须接上paperId
# ORIGINAL_PAPER_URL = f"{BASE_URL}/classreport/class/student/checksheet/?userId="
ORIGINAL_PAPER_URL = f"{BASE_URL}/classreport/class/student/checksheet/"
|
36151
|
import pytest
from ssz.sedes import Bitvector
def test_bitvector_instantiation_bound():
with pytest.raises(ValueError):
bit_count = 0
Bitvector(bit_count)
|
36168
|
import unittest
import acpc_python_client as acpc
from tools.constants import Action
from weak_agents.action_tilted_agent import create_agent_strategy, create_agent_strategy_from_trained_strategy, TiltType
from tools.io_util import read_strategy_from_file
from evaluation.exploitability import Exploitability
from tools.game_utils import is_strategies_equal, is_correct_strategy
KUHN_POKER_GAME_FILE_PATH = 'games/kuhn.limit.2p.game'
LEDUC_POKER_GAME_FILE_PATH = 'games/leduc.limit.2p.game'
class WeakAgentsTests(unittest.TestCase):
def test_kuhn_action_tilted_agent_not_crashing(self):
strategy = create_agent_strategy(
KUHN_POKER_GAME_FILE_PATH,
Action.RAISE,
TiltType.ADD,
0.2,
cfr_iterations=20,
cfr_weight_delay=2,
show_progress=False)
self.assertTrue(is_correct_strategy(strategy))
def test_leduc_add_action_tilted_agent_not_crashing(self):
strategy = create_agent_strategy(
LEDUC_POKER_GAME_FILE_PATH,
Action.FOLD,
TiltType.ADD,
0.1,
cfr_iterations=5,
cfr_weight_delay=2,
show_progress=False)
self.assertTrue(is_correct_strategy(strategy))
def test_leduc_multiply_action_tilted_agent_not_crashing(self):
strategy = create_agent_strategy(
LEDUC_POKER_GAME_FILE_PATH,
Action.FOLD,
TiltType.MULTIPLY,
0.1,
cfr_iterations=5,
cfr_weight_delay=2,
show_progress=False)
self.assertTrue(is_correct_strategy(strategy))
def test_kuhn_action_tilted_agent(self):
kuhn_equilibrium, _ = read_strategy_from_file(
KUHN_POKER_GAME_FILE_PATH,
'strategies/kuhn.limit.2p-equilibrium.strategy')
game = acpc.read_game_file(KUHN_POKER_GAME_FILE_PATH)
exploitability = Exploitability(game)
tilted_agent_strategy = create_agent_strategy_from_trained_strategy(
KUHN_POKER_GAME_FILE_PATH,
kuhn_equilibrium,
Action.RAISE,
TiltType.ADD,
0.2)
self.assertTrue(is_correct_strategy(tilted_agent_strategy))
self.assertTrue(not is_strategies_equal(kuhn_equilibrium, tilted_agent_strategy))
equilibrium_exploitability = exploitability.evaluate(kuhn_equilibrium)
raise_add_tilted_exploitability = exploitability.evaluate(tilted_agent_strategy)
self.assertTrue(raise_add_tilted_exploitability > equilibrium_exploitability)
def test_kuhn_action_minus_tilted_agent(self):
kuhn_equilibrium, _ = read_strategy_from_file(
KUHN_POKER_GAME_FILE_PATH,
'strategies/kuhn.limit.2p-equilibrium.strategy')
game = acpc.read_game_file(KUHN_POKER_GAME_FILE_PATH)
exploitability = Exploitability(game)
tilted_agent_strategy = create_agent_strategy_from_trained_strategy(
KUHN_POKER_GAME_FILE_PATH,
kuhn_equilibrium,
Action.CALL,
TiltType.ADD,
-0.5)
self.assertTrue(is_correct_strategy(tilted_agent_strategy))
self.assertTrue(not is_strategies_equal(kuhn_equilibrium, tilted_agent_strategy))
equilibrium_exploitability = exploitability.evaluate(kuhn_equilibrium)
raise_add_tilted_exploitability = exploitability.evaluate(tilted_agent_strategy)
self.assertTrue(raise_add_tilted_exploitability > equilibrium_exploitability)
|
36170
|
import functools
import operator
from collections.abc import Iterable
from typing import overload, Union, TypeVar
T = TypeVar('T')
S = TypeVar('S') # <1>
@overload
def sum(it: Iterable[T]) -> Union[T, int]: ... # <2>
@overload
def sum(it: Iterable[T], /, start: S) -> Union[T, S]: ... # <3>
def sum(it, /, start=0): # <4>
return functools.reduce(operator.add, it, start)
|
36225
|
from camper.db import BarcampSchema, Barcamp
import datetime
def test_get_empty_registration_form(barcamps, barcamp):
barcamps.save(barcamp)
barcamp = barcamps.by_slug("barcamp")
assert barcamp.registration_form == []
def test_add_to_registration_form(barcamps, barcamp):
barcamps.save(barcamp)
field = {
'name' : 'fullname',
'title' : 'Your full name, please',
'fieldtype' : 'textfield',
'description' : 'enter your full name here',
'required' : False,
}
barcamp = barcamps.by_slug("barcamp")
barcamp.registration_form.append(field)
barcamp.save()
barcamp = barcamps.by_slug("barcamp")
assert len(barcamp.registration_form) == 1
def test_save_registration_data(barcamps, barcamp):
barcamps.save(barcamp)
# create the field
field = {
'name' : 'fullname',
'title' : 'Your full name, please',
'fieldtype' : 'textfield',
'description' : 'enter your full name here',
'required' : False,
}
barcamp = barcamps.by_slug("barcamp")
barcamp.registration_form.append(field)
barcamp.save()
barcamp = barcamps.by_slug("barcamp")
# use the field
assert len(barcamp.registration_form) == 1
|
36230
|
from pathlib import Path
from unittest.mock import call
import pytest
from dbt_sugar.core.clients.dbt import DbtProfile
from dbt_sugar.core.config.config import DbtSugarConfig
from dbt_sugar.core.flags import FlagParser
from dbt_sugar.core.main import parser
from dbt_sugar.core.task.audit import AuditTask
from dbt_sugar.core.task.base import COLUMN_NOT_DOCUMENTED
FIXTURE_DIR = Path(__file__).resolve().parent
def __init_descriptions(datafiles):
flag_parser = FlagParser(parser)
config_filepath = Path(FIXTURE_DIR).joinpath("sugar_config.yml")
flag_parser.consume_cli_arguments(
test_cli_args=[
"audit",
"--config-path",
str(config_filepath),
]
)
sugar_config = DbtSugarConfig(flag_parser)
sugar_config.load_config()
profile = DbtProfile(
flags=flag_parser,
profile_name="dbt_sugar_test",
target_name=str(),
profiles_dir=Path(datafiles),
)
profile.read_profile()
audit_task = AuditTask(flag_parser, FIXTURE_DIR, sugar_config=sugar_config, dbt_profile=profile)
audit_task.dbt_definitions = {"columnA": "descriptionA", "columnB": "descriptionB"}
audit_task.repository_path = Path("tests/test_dbt_project/")
return audit_task
@pytest.mark.parametrize(
"dbt_definitions, result",
[
pytest.param(
{"columnA": "descriptionA", "columnB": "descriptionB"},
"100.0",
id="all_columns_documented",
),
pytest.param(
{"columnA": COLUMN_NOT_DOCUMENTED, "columnB": COLUMN_NOT_DOCUMENTED},
"0.0",
id="none_columns_documented",
),
pytest.param(
{"columnA": "descriptionA", "columnB": COLUMN_NOT_DOCUMENTED},
"50.0",
id="half_columns_documented",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_project_total_test_coverage(datafiles, dbt_definitions, result):
audit_task = __init_descriptions(datafiles)
audit_task.dbt_definitions = dbt_definitions
assert audit_task.get_project_total_test_coverage() == result
@pytest.mark.parametrize(
"failures, total, result",
[
pytest.param(
0,
0,
"0.0",
id="calculate_failures_with_0_failures_and_total",
),
pytest.param(
8,
10,
"20.0",
id="calculate_failures",
),
pytest.param(
0,
10,
"100.0",
id="calculate_failures_with_0_failures",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_calculate_coverage_percentage(datafiles, failures, total, result):
audit_task = __init_descriptions(datafiles)
assert audit_task.calculate_coverage_percentage(misses=failures, total=total) == result
@pytest.mark.parametrize(
"data, total, result",
[
pytest.param(
[],
"0.0",
{},
id="check_results_with_data_being_empty",
),
pytest.param(
["column_A"],
"10.0",
{"column_A": "", "": "", "Total": "10.0"},
id="check_results_with_one_data_element",
),
pytest.param(
["column_A", "column_B"],
"10.0",
{"column_A": "", "column_B": "", "": "", "Total": "10.0"},
id="check_results_with_more_than_one_data_element",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_print_nicely_the_data(datafiles, data, total, result):
audit_task = __init_descriptions(datafiles)
assert audit_task.print_nicely_the_data(data=data, total=total) == result
@pytest.mark.parametrize(
"dbt_tests, model_name, call_input",
[
pytest.param(
{
"dim_company": [
{"name": "id", "tests": []},
{"name": "name", "tests": []},
{"name": "age", "tests": []},
{"name": "address", "tests": ["not_null"]},
{"name": "salary", "tests": ["unique"]},
],
"stg_customers": [{"name": "customer_id", "tests": ["unique", "not_null"]}],
},
"dim_company",
[
call(
columns=["Untested Columns", "% coverage"],
data={"age": "", "id": "", "name": "", "": "", "Total": "40.0"},
title="Test Coverage",
)
],
id="check_test_coverage_calculation",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_model_test_coverage(datafiles, mocker, dbt_tests, model_name, call_input):
create_table = mocker.patch("dbt_sugar.core.task.audit.AuditTask.create_table")
audit_task = __init_descriptions(datafiles)
audit_task.model_name = model_name
audit_task.dbt_tests = dbt_tests
audit_task.get_model_test_coverage()
create_table.assert_has_calls(call_input)
@pytest.mark.parametrize(
"dbt_tests, call_input",
[
pytest.param(
{
"dim_company": [
{"name": "id", "tests": []},
{"name": "name", "tests": []},
{"name": "age", "tests": []},
{"name": "address", "tests": ["not_null"]},
{"name": "salary", "tests": ["unique"]},
],
"stg_customers": [{"name": "customer_id", "tests": ["unique", "not_null"]}],
},
[
call(
columns=["Model Name", "% coverage"],
data={"dim_company": "40.0", "stg_customers": "100.0", "": "", "Total": "50.0"},
title="Test Coverage",
)
],
id="check_test_coverage_calculation",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_project_test_coverage(datafiles, mocker, dbt_tests, call_input):
create_table = mocker.patch("dbt_sugar.core.task.audit.AuditTask.create_table")
audit_task = __init_descriptions(datafiles)
audit_task.dbt_tests = dbt_tests
audit_task.get_project_test_coverage()
create_table.assert_has_calls(call_input)
@pytest.mark.parametrize(
"model_content, model_name, call_input",
[
pytest.param(
{
"version": 2,
"models": [
{
"name": "dim_company",
"description": "aa.",
"columns": [
{"name": "id", "description": "No description for this column."},
{"name": "name", "description": "No description for this column."},
{"name": "age", "description": "No description for this column."},
{
"name": "address",
"description": "No description for this column.",
"tests": ["not_null"],
},
{"name": "salary", "description": "hey.", "tests": ["unique"]},
],
}
],
},
"dim_company",
[
call(
columns=["Undocumented Columns", "% coverage"],
data={"id": "", "name": "", "age": "", "address": "", "": "", "Total": "20.0"},
title="Documentation Coverage",
)
],
id="check_column_description_coverage_calculation",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_model_column_description_coverage(
datafiles, mocker, model_content, model_name, call_input
):
audit_task = __init_descriptions(datafiles)
audit_task.get_model_column_description_coverage()
create_table = mocker.patch("dbt_sugar.core.task.audit.AuditTask.create_table")
audit_task = __init_descriptions(datafiles)
audit_task.model_content = model_content
audit_task.model_name = model_name
audit_task.get_model_column_description_coverage()
create_table.assert_has_calls(call_input)
|
36265
|
import logging
from flask import Response, make_response, request
from microraiden import HTTPHeaders as header
from flask_restful.utils import unpack
from microraiden.channel_manager import (
ChannelManager,
)
from microraiden.exceptions import (
NoOpenChannel,
InvalidBalanceProof,
InvalidBalanceAmount,
InsufficientConfirmations
)
import microraiden.constants as constants
from microraiden.proxy.resources.request_data import RequestData
from functools import wraps
from eth_utils import is_address
log = logging.getLogger(__name__)
class Paywall(object):
def __init__(self,
channel_manager,
light_client_proxy=None
):
super().__init__()
assert isinstance(channel_manager, ChannelManager)
assert is_address(channel_manager.channel_manager_contract.address)
assert is_address(channel_manager.receiver)
self.contract_address = channel_manager.channel_manager_contract.address
self.receiver_address = channel_manager.receiver
self.channel_manager = channel_manager
self.light_client_proxy = light_client_proxy
def access(self, resource, method, *args, **kwargs):
if self.channel_manager.node_online() is False:
return "Ethereum node is not responding", 502
if self.channel_manager.get_eth_balance() < constants.PROXY_BALANCE_LIMIT:
return "Channel manager ETH balance is below limit", 502
try:
data = RequestData(request.headers, request.cookies)
except ValueError as e:
return str(e), 409
accepts_html = (
'text/html' in request.accept_mimetypes and
request.accept_mimetypes.best != '*/*'
)
headers = {}
price = resource.price()
# payment required
if price > 0:
paywall, headers = self.paywall_check(price, data)
if paywall and accepts_html is True:
reply_data = resource.get_paywall(request.path)
return self.reply_webui(reply_data, headers)
elif paywall:
return make_response('', 402, headers)
# all ok, return actual content
resp = method(request.path, *args, **kwargs)
# merge headers, resource headers take precedence
headers_lower = {key.lower(): value for key, value in headers.items()}
lower_to_case = {key.lower(): key for key in headers}
if isinstance(resp, Response):
resource_headers = (key for key, value in resp.headers)
else:
data, code, resource_headers = unpack(resp)
for key in resource_headers:
key_lower = key.lower()
if key_lower in headers_lower:
headers.pop(lower_to_case[key_lower])
if isinstance(resp, Response):
resp.headers.extend(headers)
return resp
else:
headers.update(resource_headers)
return make_response(str(data), code, resource_headers)
def paywall_check(self, price, data):
"""Check if the resource can be sent to the client.
Returns (is_paywalled: Bool, http_headers: dict)
"""
headers = self.generate_headers(price)
if not data.balance_signature:
return True, headers
# try to get an existing channel
try:
channel = self.channel_manager.verify_balance_proof(
data.sender_address, data.open_block_number,
data.balance, data.balance_signature)
except InsufficientConfirmations as e:
log.debug('Refused payment: Insufficient confirmations (sender=%s, block=%d)' %
(data.sender_address, data.open_block_number))
headers.update({header.INSUF_CONFS: "1"})
return True, headers
except NoOpenChannel as e:
log.debug('Refused payment: Channel does not exist (sender=%s, block=%d)' %
(data.sender_address, data.open_block_number))
headers.update({header.NONEXISTING_CHANNEL: "1"})
return True, headers
except InvalidBalanceAmount as e:
log.debug('Refused payment: Invalid balance amount: %s (sender=%s, block=%d)' %
(str(e), data.sender_address, data.open_block_number))
headers.update({header.INVALID_PROOF: 1})
return True, headers
except InvalidBalanceProof as e:
log.debug('Refused payment: Invalid balance proof: %s (sender=%s, block=%d)' %
(str(e), data.sender_address, data.open_block_number))
headers.update({header.INVALID_PROOF: 1})
return True, headers
# set headers to reflect channel state
assert channel.sender is not None
assert channel.balance >= 0
headers.update(
{
header.SENDER_ADDRESS: channel.sender,
header.SENDER_BALANCE: channel.balance
})
if channel.last_signature is not None:
headers.update({header.BALANCE_SIGNATURE: channel.last_signature})
amount_sent = data.balance - channel.balance
if amount_sent != 0 and amount_sent != price:
headers[header.INVALID_AMOUNT] = 1
# if difference is 0, it will be handled by channel manager
return True, headers
# set the headers to reflect actual state of a channel
try:
self.channel_manager.register_payment(
channel.sender,
data.open_block_number,
data.balance,
data.balance_signature)
except (InvalidBalanceAmount, InvalidBalanceProof):
# balance sent to the proxy is less than in the previous proof
return True, headers
# all ok, return premium content
return False, headers
# when are these generated?
def generate_headers(self, price: int):
assert price > 0
"""Generate basic headers that are sent back for every request"""
headers = {
header.GATEWAY_PATH: constants.API_PATH,
header.RECEIVER_ADDRESS: self.receiver_address,
header.CONTRACT_ADDRESS: self.contract_address,
header.TOKEN_ADDRESS: self.channel_manager.get_token_address(),
header.PRICE: price,
'Content-Type': 'application/json'
}
return headers
def reply_webui(self, reply_data='', headers: dict={}):
headers.update({
"Content-Type": "text/html",
})
reply = make_response(reply_data, 402, headers)
for k, v in headers.items():
if k.startswith('RDN-'):
reply.set_cookie(k, str(v))
return reply
def paywall_decorator(func):
"""Method decorator for Flask's Resource object. It magically makes
every method paywalled.
Example:
class MyPaywalledResource(Resource):
method_decorators = [paywall_decorator]
"""
@wraps(func)
def wrapper(*args, **kwargs):
self = func.__self__ # get instance of the bound method
return self.paywall.access(
self,
func,
*args,
**kwargs
)
return wrapper
|
36272
|
from scipy import linalg
from sklearn.decomposition import PCA
from scipy.optimize import linear_sum_assignment as linear_assignment
import numpy as np
"""
A function that takes a list of clusters, and a list of centroids for each cluster, and outputs the N max closest images in each cluster to its centroids
"""
def closest_to_centroid(clusters,centroids,nb_closest=20):
output = [[] for i in range(len(centroids))]
#print(clusters)
for i in range(len(centroids)):
centroid = centroids[i]
cluster = clusters[i]
try :
cluste_temp = [x.cpu() if x.is_cuda else x for x in cluster]
except :
cluste_temp = cluster
cluster = [list(x) for x in cluste_temp]
nb_components = 7 if len(cluster)>10 else len(cluster) - 1
pca = PCA(n_components=nb_components) #args.sty_dim)
if len(cluster) > nb_closest :
cluster = pca.fit_transform(cluster)
centroid = centroid.reshape(1, -1)
centroid = pca.transform(centroid)
distances = [linalg.norm(x-centroid) for x in cluster]
duplicate_distances = distances
distances.sort()
if len(distances)>=nb_closest :
distances = distances[:nb_closest]
output[i] = [True if x in distances else False for x in duplicate_distances]
return output
def cluster_acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = linear_assignment(w.max() - w)
indi = list(ind[0])
indj = list(ind[1])
the_sum = sum([w[i, j] for i, j in zip(indi,indj)])
return the_sum * 1.0 / y_pred.size
|
36278
|
import copy
import time
from datetime import datetime
import binascii
import graphviz
import random
from .models.enums.start_location import StartLocation
from .models.enums.goal import Goal
from .models.enums.statue_req import StatueReq
from .models.enums.entrance_shuffle import EntranceShuffle
from .models.enums.enemizer import Enemizer
from .models.enums.logic import Logic
from .models.randomizer_data import RandomizerData
MAX_INVENTORY = 15
PROGRESS_ADJ = [1.5, 1.25, 1, 0.75] # Required items are more likely to be placed in easier modes
MAX_CYCLES = 100
INACCESSIBLE = 9999
class World:
# Assigns item to location
def fill_item(self, item, location=-1,test=False,override_restrictions=False,print_log=False):
if location == -1:
return False
elif self.item_locations[location][2]:
if print_log:
print("ERROR: Attempted to place an item in a full location")
return False
elif item in self.item_locations[location][4] and not override_restrictions:
if print_log:
print("ERROR: Attempt to place item in a restricted location:",[self.item_pool[item][3],self.item_locations[location][9]])
return False
elif test:
return True
self.item_pool[item][0] -= 1
self.item_locations[location][2] = True
self.item_locations[location][3] = item
if print_log:
print(" ",self.item_pool[item][3],"->",self.item_locations[location][9])
if self.is_accessible(self.item_locations[location][0]):
self.items_collected.append(item)
if location in self.open_locations[0]:
self.open_locations[0].remove(location)
elif location in self.open_locations[1]:
self.open_locations[1].remove(location)
self.placement_log.append([item, location])
#if self.item_locations[location][1] == 2:
# self.check_logic()
return True
# Removes an assigned item and returns it to item pool
def unfill_item(self, location=-1, print_log=False):
if location == -1:
return -1
elif not self.item_locations[location][2]:
return -1
item = self.item_locations[location][3]
self.item_locations[location][2] = False
self.item_locations[location][3] = 0
self.item_pool[item][0] += 1
if print_log:
print(" ",self.item_pool[item][3],"<-",self.item_locations[location][9],"removed")
if self.is_accessible(self.item_locations[location][0]):
if item in self.items_collected:
self.items_collected.remove(item)
type = self.item_pool[item][1]
if location not in self.open_locations[type-1]:
self.open_locations[type-1].append(location)
for x in self.placement_log:
if x[1] == location:
self.placement_log.remove(x)
return item
# Converts item pool into list of unique items, returns list
def list_item_pool(self, type=0, items=[], progress_type=0):
item_list = []
for x in self.item_pool:
if not items or x in items:
if not type or type == self.item_pool[x][1]:
if not progress_type or progress_type == self.item_pool[x][5]:
i = 0
while i < self.item_pool[x][0]:
item_list.append(x)
i += 1
return item_list
# Returns a list of unfilled item locations
def list_item_locations(self):
locations = []
for x in self.item_locations:
locations.append(x)
return locations
# Returns list of graph edges
def list_logic(self):
edges = []
for x in self.logic:
edges.append(x)
return edges
# Checks if one list is contained inside another list
def is_sublist(self, list, sublist):
if sublist == []:
return True
elif sublist == list:
return True
elif len(sublist) > len(list):
return False
l = list[:]
for x in sublist:
if x in l:
l.remove(x)
else:
return False
return True
# Returns lists of accessible item, ability, and statue locations
def find_open_locations(self):
# Accessible open location for items, abilities, and Mystic Statues
locations = [[], [], [], []]
for x in self.item_locations:
region = self.item_locations[x][0]
type = self.item_locations[x][1]
if self.graph[region][0] and not self.item_locations[x][2]:
locations[type - 1].append(x)
self.open_locations[0] = locations[0][:]
self.open_locations[1] = locations[1][:]
return locations
# Returns graph node of an item location
def location_node(self, location_id=-1,print_log=False):
if location_id not in self.item_locations:
if print_log:
print("ERROR: Invalid item location", location_id)
return False
else:
return self.item_locations[location_id][0]
# Returns whether an item location is already filled with an item
def is_filled(self, location_id=-1,print_log=False):
if location_id not in self.item_locations:
if print_log:
print("ERROR: Invalid item location", location_id)
return False
else:
return self.item_locations[location_id][2]
# Zeroes out accessible flags for all world regions
def is_accessible(self, node_id=-1):
if node_id not in self.graph:
return False
elif self.graph[node_id][0]:
return True
else:
return False
# Zeroes out accessible flags for all world regions
def unsolve(self,reset_graph=False):
for x in self.graph:
self.graph[x][0] = False
if reset_graph:
self.graph[x][4] = 0
self.graph[x][8].clear()
self.graph[x][9].clear()
self.graph[x][10] = self.graph[x][1][:]
for x in self.logic:
if self.logic[x][0] == 1:
self.logic[x][0] = 0
return True
# Resets collected items and other traversal data
def reset_progress(self,reset_graph=False):
self.visited.clear()
self.items_collected.clear()
self.item_destinations.clear()
self.open_locations = [[],[]]
self.open_edges = []
self.unsolve(reset_graph)
return True
# Finds every accessible node in the graph
# Collects items into self.items_collected, edges into self.open_edges
def traverse(self,to_visit=[],test=False,print_log=False):
if print_log:
print(" Beginning traversal...")
visited = []
new_items = []
if not to_visit:
to_visit.append(0)
while to_visit:
node = to_visit.pop(0)
visited.append(node)
if print_log:
print(" Visiting:",self.graph[node][5])
# If we haven't been here yet...
if not self.graph[node][0]:
# Get the newly-accessible items and record open item/ability locations
new_items += self.visit_node(node,test,print_log)
# Queue up newly-accessible places to visit
for x in self.graph[node][10]:
if x != node and not self.is_accessible(x) and x not in to_visit+visited:
to_visit.insert(0,x)
if print_log:
print(" -Discovered:",self.graph[x][5])
# If we've run out of places to visit, check if logic has opened up any new nodes
if not to_visit:
open_edges = self.get_open_edges(visited)
bad_edges = []
if print_log:
print(" Ran out of places - updating logic:")
for edge in open_edges:
dest = self.logic[edge][2]
if self.check_edge(edge,[],False) and dest not in to_visit:
self.logic[edge][0] = 1
to_visit.append(dest)
if print_log:
print(" -Discovered:",self.graph[dest][5])
else:
bad_edges.append(edge)
if not test:
self.open_edges = bad_edges
return [visited,new_items]
# Return list of logic edges that originate in an accessible node and end in an inaccessible node
def get_open_edges(self,nodes=[]):
test_edges = self.open_edges[:]
open_edges = []
for x in nodes:
if not self.is_accessible(x):
test_edges += self.graph[x][12]
for edge in test_edges:
origin = self.logic[edge][1]
dest = self.logic[edge][2]
if self.logic[edge][0] >= 0 and not self.is_accessible(dest) and dest not in nodes:
open_edges.append(edge)
return open_edges
# Visit a node, update graph info, return new items collected
def visit_node(self,node,test=False,print_log=False):
if not test and not self.graph[node][0]:
self.graph[node][0] = True
self.visited.append(node)
self.item_destinations += self.graph[node][6]
self.open_edges += self.graph[node][12]
return self.collect_items(node,test,print_log)
# Collect all items in given node
def collect_items(self,node=-1,test=False,print_log=False):
if node not in self.graph:
return False
items_found = []
for location in self.graph[node][11]:
if self.item_locations[location][2]:
items_found.append(self.item_locations[location][3])
if not test:
self.items_collected.append(self.item_locations[location][3])
if print_log:
print(" -Collected:",self.item_pool[self.item_locations[location][3]][3])
elif self.item_locations[location][1] == 1 and not test:
self.open_locations[0].append(location)
if print_log:
print(" -Discovered:",self.item_locations[location][9])
elif self.item_locations[location][1] == 2 and not test:
self.open_locations[1].append(location)
if print_log:
print(" -Discovered:",self.item_locations[location][9])
return items_found
# Returns full list of accessible locations
def accessible_locations(self, item_locations):
accessible = []
for x in item_locations:
region = self.item_locations[x][0]
if self.is_accessible(region):
accessible.append(x)
return accessible
# Returns full list of inaccessible locations
def inaccessible_locations(self, item_locations):
inaccessible = []
for x in item_locations:
region = self.item_locations[x][0]
if not self.is_accessible(region):
inaccessible.append(x)
return inaccessible
# Fill a list of items randomly in a list of locations
def random_fill(self, items=[], item_locations=[], accessible=True, print_log=False):
if not items:
return True
elif not item_locations:
return False
to_place = items[:]
to_fill = item_locations[:]
while to_place:
item = to_place.pop(0)
item_type = self.item_pool[item][1]
placed = False
i = 0
for dest in to_fill:
if not placed:
region = self.item_locations[dest][0]
location_type = self.item_locations[dest][1]
filled = self.item_locations[dest][2]
restrictions = self.item_locations[dest][4]
if not filled and item_type == location_type and item not in restrictions:
if not accessible or region != INACCESSIBLE:
if self.fill_item(item, dest, False, False, print_log):
to_fill.remove(dest)
placed = True
return True
# Place list of items into random accessible locations
def forward_fill(self, items=[], item_locations=[], test=False, override_restrictions=False, print_log=False):
if not items:
return True
elif not item_locations:
if print_log:
print("ERROR: No item locations given")
return False
to_place = items[:]
to_fill =[[],[],[]]
for loc in item_locations:
if not self.item_locations[loc][2] and self.is_accessible(self.item_locations[loc][0]):
loc_type = self.item_locations[loc][1]
to_fill[loc_type-1].append(loc)
quarantine = [[],[],[]]
filled_locations = []
while to_place:
item = to_place.pop(0)
item_type = self.item_pool[item][1]
filled = False
while not filled and to_fill[item_type-1]:
location = to_fill[item_type-1].pop(0)
if self.fill_item(item,location,test,override_restrictions,print_log):
filled = True
filled_locations.append(location)
to_fill[item_type-1] += quarantine[item_type-1]
else:
quarantine[item_type-1].append(location)
items.append(item)
if not filled:
if print_log:
print("ERROR: Not enough room to place items")
return False
return True
# Convert a prerequisite to a list of items needed to fulfill it
def items_needed(self, edge=0):
if not edge:
return []
prereq = []
for req in self.logic[edge][4]:
item = req[0]
ct = req[1]
i = 0
while i < ct:
prereq.append(item)
i += 1
if not self.items_collected:
return prereq
prereq_new = []
items_new = self.items_collected[:]
while prereq:
x = prereq.pop(0)
if x in items_new:
items_new.remove(x)
else:
prereq_new.append(x)
return prereq_new
# Returns list of item combinations that grant progression
# Returns progression list in the following categories: [[available],[not enough room],[too many inventory items]]
def progression_list(self,open_edges=[]):
if not open_edges:
open_edges = self.get_open_edges()
all_items = self.list_item_pool(1)
#open_locations = self.find_open_locations()
open_locations = len(self.open_locations[0])
prereq_list = [[],[],[]] # [[available],[not enough room],[too many inventory items]]
ds_list = []
for edge in open_edges:
prereq = self.items_needed(edge)
if prereq and prereq not in prereq_list[0] and self.is_sublist(all_items, prereq):
if prereq not in prereq_list[1] and not self.forward_fill(prereq,self.open_locations[0],True,self.logic_mode == "Chaos"):
prereq_list[1].append(prereq)
elif prereq not in prereq_list[2]:
dest = self.logic[edge][2]
traverse_result = self.traverse([dest],True)
new_nodes = traverse_result[0]
start_items_temp = self.items_collected[:] + prereq + traverse_result[1]
item_destinations_temp = self.item_destinations[:]
for x in new_nodes:
item_destinations_temp += self.graph[x][6]
inv_temp = self.get_inventory(start_items_temp,item_destinations_temp)
if len(inv_temp) <= MAX_INVENTORY:
if self.entrance_shuffle == "None" or self.check_ds_access(dest,False,start_items_temp):
prereq_list[0].append(prereq)
else:
ds_list.append(prereq)
else:
prereq_list[2].append(prereq)
if prereq_list == [[],[],[]]:
prereq_list[0] += ds_list
return prereq_list
# Find and clear non-progression item to make room for progression item
def make_room(self, progression_result, print_log=False):
# For inventory bottlenecks, remove one inventory item and try again
if not progression_result[1] and progression_result[2]:
return self.remove_nonprog(1,0,True,print_log)
success = False
for node in self.visited:
if not success:
for x in self.graph[node][11]:
if self.is_filled(x) and self.item_pool[self.item_locations[x][3]][5]>1:
if self.unfill_item(x,print_log):
success = True
return success
#### THIS IS OLD, OBSELETE CODE
# non_prog_locations = [[],[]]
# open_locations = len(self.open_locations[0])
# open_abilities = len(self.open_locations[1])
# unfilled = []
# min_prereqs = []
# min_item_ct = 0
# min_ability_ct = 0
# progression_list = progression_result[1][:]
# while progression_list:
# prereq = progression_list.pop(0)
# items_needed = -open_locations
# abilities_needed = -open_abilities
# for x in prereq:
# if self.item_pool[x][1] == 1:
# items_needed += 1
# elif self.item_pool[x][1] == 2:
# abilities_needed += 1
# items_needed = max(0,items_needed)
# abilities_needed = max(0,abilities_needed)
# if not min_prereqs or min_item_ct+min_ability_ct > items_needed + abilities_needed:
# min_prereqs = [prereq]
# min_item_ct = items_needed
# min_ability_ct = abilities_needed
# elif min_prereqs and min_item_ct == items_needed and min_ability_ct == abilities_needed:
# min_prereqs.append(prereq)
#
# if not self.remove_nonprog(min_item_ct,min_ability_ct,False,print_log):
# if print_log:
# print("ERROR: Could not make room")
# return False
#
# return min_prereqs
# Remove an accessible non-progression item to make room for a progression item
def remove_nonprog(self,item_ct=0,ability_ct=0,inv=False,print_log=False):
junk_locations = [[],[]]
quest_locations = [[],[]]
for location in self.item_locations:
if self.item_locations[location][2] and self.is_accessible(self.item_locations[location][0]):
item = self.item_locations[location][3]
type = self.item_pool[item][1]
prog_type = self.item_pool[item][5]
inv_type = self.item_pool[item][4]
if type <= 2:
if prog_type == 2:
quest_locations[type-1].append(location)
elif prog_type == 3:
if not inv or inv_type:
junk_locations[type-1].append(location)
random.shuffle(junk_locations[0])
random.shuffle(junk_locations[1])
random.shuffle(quest_locations[0])
random.shuffle(quest_locations[1])
quest = False
type = 1
locations = junk_locations[0]
count = item_ct
done = False
items_removed = []
while not done:
if not count and type == 1:
type == 2
count = ability_ct
quest = False
locations = junk_locations[1]
if not count and type == 2:
done = True
else:
if not locations and not quest:
quest = True
locations = quest_locations[type-1]
if not locations:
if print_log:
print("ERROR: Not enough room")
return False
location = locations.pop(0)
items_removed.append(self.unfill_item(location))
count -= 1
if print_log:
print(" Removed these items:",items_removed)
return items_removed
# Converts a progression list into a normalized Monte Carlo distribution
def monte_carlo(self, progression_ls=[], start_items=[]):
if not progression_ls:
return []
progression = progression_ls[:]
items = self.list_item_pool(1)
abilities = self.list_item_pool(2)
all_items = items + abilities
sum_items = len(items)
sum_abilities = len(abilities)
probability = []
monte_carlo = []
sum_prob = 0
sum_edges = 0
probabilities = []
idx = 0
while progression:
current_prereq = progression.pop(0)
prereqs = current_prereq[:]
probability = 1.0
i = 0
j = 0
while prereqs:
item = prereqs.pop(0)
if item in all_items:
if self.item_pool[item][1] == 1:
probability *= float(self.item_pool[item][0]) / float((sum_items - i))
i += 1
elif self.item_pool[item][1] == 2:
probability *= float(self.item_pool[item][0]) / float((sum_abilities - j))
j += 1
if item in self.required_items:
probability *= PROGRESS_ADJ[self.difficulty]
probabilities.append([probability, idx])
sum_prob += probability
sum_edges += 1
idx += 1
prob_adj = 100.0 / sum_prob
rolling_sum = 0.0
for x in probabilities:
x[0] = x[0] * prob_adj + rolling_sum
rolling_sum = x[0]
# print probabilities
return probabilities
# Returns a list of map lists, by boss
def get_maps(self):
maps = [[], [], [], [], [], [], []]
for map in self.maps:
boss = self.maps[map][1]
maps[boss].append(map)
maps.pop(0)
return maps
# Randomize map-clearing rewards
def map_rewards(self):
maps = self.get_maps()
# print maps
for area in maps:
random.shuffle(area)
boss_rewards = 4
# Total rewards by type, by level (HP/STR/DEF)
if "Z3 Mode" in self.variant:
rewards_tier1 = [1] * 6 # Expert: 6 HP
rewards_tier2 = [1] * 6 # Advanced: 12 HP
rewards_tier3 = [1] * 6 # Intermediate: 18 HP
rewards_tier4 = [] # Beginner: 18 HP
else: # Remove all HP upgrades
rewards_tier1 = [1,1,1,1,1,1] # Expert: 6/0/0
rewards_tier2 = [1,1,2,2,3,3] # Advanced: 8/2/2
rewards_tier3 = [1,1,2,2,3,3] # Intermediate: 10/4/4
rewards_tier4 = [2,2,2,3,3,3] # Beginner: 10/7/7
# Remove HP upgrades in OHKO
if "OHKO" in self.variant:
for n, i in enumerate(rewards_tier1):
if i == 1:
rewards_tier1[n] = 0
for n, i in enumerate(rewards_tier2):
if i == 1:
rewards_tier2[n] = 0
for n, i in enumerate(rewards_tier3):
if i == 1:
rewards_tier3[n] = 0
for n, i in enumerate(rewards_tier4):
if i == 1:
rewards_tier4[n] = 0
random.shuffle(rewards_tier1)
random.shuffle(rewards_tier2)
random.shuffle(rewards_tier3)
random.shuffle(rewards_tier4)
# Allocate rewards to maps
for area in maps:
random.shuffle(area)
self.maps[area[0]][2] = [rewards_tier1.pop(0),1]
self.maps[area[1]][2] = [rewards_tier2.pop(0),2]
self.maps[area[2]][2] = [rewards_tier3.pop(0),3]
if rewards_tier4:
self.maps[area[3]][2] = [rewards_tier4.pop(0),4]
else:
self.maps[area[3]][2] = [0,4]
# Place Mystic Statues in World
def fill_statues(self, locations=[148, 149, 150, 151, 152, 153]):
if self.statue_req == StatueReq.PLAYER_CHOICE.value:
return self.random_fill([106]*6, locations)
return self.random_fill([100, 101, 102, 103, 104, 105], locations)
def lock_dark_spaces(self,print_log=False):
nodes = []
for edge in self.logic:
if self.logic[edge][0] >-1 and self.logic[edge][3]:
nodes.append(self.logic[edge][1])
for node in nodes:
if not self.check_ds_access(node, True):
if print_log:
print("ERROR: No Dark Space could be accessed ")
return False
else:
found_locked_ds = False
nodes_to_check = self.graph[node][9][:]
random.shuffle(nodes_to_check)
while not found_locked_ds and nodes_to_check:
ds_node = nodes_to_check.pop(0)
ds_loc = self.ds_locations[self.ds_nodes.index(ds_node)]
if self.item_locations[ds_loc][2] and not self.item_locations[ds_loc][3]:
found_locked_ds = True
#if print_log:
# print(" -Found:",self.item_locations[ds_loc][9])
if not found_locked_ds:
self.item_locations[ds_loc][2] = True
if self.item_locations[ds_loc][3]:
self.unfill_item(ds_loc)
if print_log:
print(" -Locked:",self.item_locations[ds_loc][9])
return True
# Determine an exit's direction (e.g. outside to inside)
def is_exit_coupled(self,exit,print_log=False):
if exit not in self.exits:
return False
if self.exits[exit][0]:
sister_exit = self.exits[exit][0]
if self.exits[sister_exit][0] == exit:
return sister_exit
else:
if print_log:
print("WARNING: Exits linked incorrectly",exit,sister_exit)
return sister_exit
return False
# Determine an exit's direction (e.g. outside to inside)
def exit_direction(self,exit):
if exit not in self.exits:
return False
origin = self.exits[exit][3]
dest = self.exits[exit][4]
if self.graph[origin][2] == 2:
o_type = 2
else:
o_type = 1
if self.graph[dest][2] == 2:
d_type = 2
else:
d_type = 1
# return (o_type,d_type)
if o_type == 2 and d_type == 2:
return (1,1)
else:
return d_type
# Get lists of unmatched origin/destination exits
# def get_remaining_exits(self):
# exits_remaining = [[],[]]
# for exit in self.exits:
# if self.exits[exit][1] == -1:
# exits_remaining[0].append(exit)
# if self.exits[exit][2] == -1:
# exits_remaining[1].append(exit)
# return exits_remaining
# Link one exit to another
def link_exits(self, origin_exit, dest_exit, print_log=False, check_connections=True, update_graph=True):
if origin_exit not in self.exits:
if print_log:
print("ERROR: Invalid origin (link)", origin_exit)
return False
if dest_exit not in self.exits:
if print_log:
print("ERROR: Invalid destination (link)", dest_exit)
return False
if print_log and self.exits[origin_exit][1] != -1 and origin_exit > 21:
print("WARNING: Origin already linked", origin_exit)
if print_log and self.exits[dest_exit][2] != -1 and dest_exit > 21:
print("WARNING: Destination already linked", dest_exit)
self.exits[origin_exit][1] = dest_exit
self.exits[dest_exit][2] = origin_exit
self.exit_log.append([origin_exit,dest_exit])
if print_log:
print(" Linked",self.exits[origin_exit][10], "-", self.exits[dest_exit][10])
if update_graph and self.exits[origin_exit][5]:
origin = self.exits[origin_exit][3]
dest = self.exits[dest_exit][4]
if dest not in self.graph[origin][1]:
self.graph[origin][1].append(dest)
self.new_connection(origin,dest)
if (origin_exit <= 21 or self.entrance_shuffle != "Uncoupled") and check_connections and self.is_exit_coupled(origin_exit) and self.is_exit_coupled(dest_exit):
new_origin = self.exits[dest_exit][0]
new_dest = self.exits[origin_exit][0]
if new_origin <= 21: # Boss exits
if self.exits[new_origin][5] or new_origin in self.exits_detailed:
self.link_exits(new_origin, new_dest, print_log, False, update_graph)
else:
if self.exits[new_origin][1] != -1 or self.exits[new_dest][2] != -1:
if print_log:
print("WARNING: Return exit already linked:",new_origin,new_dest)
else:
self.link_exits(new_origin, new_dest, print_log, False, update_graph)
return True
# Unlinks two previously linked exits
def unlink_exits(self, origin_exit, dest_exit, print_log=False, check_connections=True, update_graph=True):
if origin_exit not in self.exits:
if print_log:
print("ERROR: Invalid origin (unlink)", origin_exit)
return False
if dest_exit not in self.exits:
if print_log:
print("ERROR: Invalid destination (unlink)", dest_exit)
return False
if print_log and (self.exits[origin_exit][1] != dest_exit or self.exits[dest_exit][2] != origin_exit):
if print_log:
print("WARNING: Attempted to unlink exits that are not correctly linked:", origin_exit, dest_exit)
self.exits[origin_exit][1] = -1
self.exits[dest_exit][2] = -1
for x in self.exit_log:
if x[0] == origin_exit:
self.exit_log.remove(x)
if print_log:
print(" Unlinked",self.exits[origin_exit][10], "-", self.exits[dest_exit][10])
if update_graph and self.exits[origin_exit][5]:
origin = self.exits[origin_exit][3]
dest = self.exits[dest_exit][4]
if dest in self.graph[origin][1]:
self.graph[origin][1].remove(dest)
if dest in self.graph[origin][10]:
self.graph[origin][10].remove(dest)
if self.entrance_shuffle != "Uncoupled" and check_connections and self.is_exit_coupled(origin_exit) and self.is_exit_coupled(dest_exit):
new_origin = self.exits[dest_exit][0]
new_dest = self.exits[origin_exit][0]
self.unlink_exits(new_origin, new_dest, print_log, False, update_graph)
if check_connections and update_graph:
self.update_graph(True,True,True,print_log)
return True
def print_exit_log(self,exit_log=[]):
for origin,dest in exit_log:
print(self.exits[origin][10],"-",self.exits[dest][10])
# Returns lists of origin exits and destination exits that open up new nodes
def get_open_exits(self,check_progression=False):
open_exits = [[],[]]
for node in self.graph:
if not check_progression or self.is_accessible(node):
for exit in self.graph[node][14]:
if self.exits[exit][1] == -1:
open_exits[0].append(exit)
if not check_progression or not self.is_accessible(node):
for exit in self.graph[node][15]:
if self.exits[exit][2] == -1:
open_exits[1].append(exit)
return open_exits
# Takes a list of origin and destination exits, returns a suitable match
def find_exit(self,origin_exits_ls=[],dest_exits_ls=[],print_log=False,check_direction=False,check_progression=False,check_ds_access=False,test=False):
if not origin_exits_ls:
if print_log:
print("ERROR: No accessible exits available")
return False
elif not dest_exits_ls:
if print_log:
print("ERROR: No destination exits available")
return False
origin_exits = origin_exits_ls[:]
dest_exits = dest_exits_ls[:]
done = False
quarantine_o = []
while not done and origin_exits:
origin_exit = 0
while not origin_exit and origin_exits:
origin_exit = origin_exits.pop(0)
origin = self.exits[origin_exit][3]
sister_exit = self.exits[origin_exit][0]
if self.exits[origin_exit][1] != -1 or (check_progression and not self.is_accessible(origin)):
origin_exit = 0
if not origin_exit:
if print_log:
print("ERROR: No accessible exits available")
return False
direction = self.exit_direction(origin_exit)
dest_exit = 0
quarantine_d = []
while not done and dest_exits:
try_link = False
while not dest_exit and dest_exits:
dest_exit = dest_exits.pop(0)
dest = self.exits[dest_exit][4]
if self.exits[dest_exit][2] != -1 or (check_progression and self.is_accessible(dest)):
dest_exit = 0
if not dest_exit:
if print_log:
print("ERROR: No destination exits available")
return False
direction_new = self.exit_direction(dest_exit)
if dest_exit != sister_exit and (not check_direction or direction_new == direction):
try_link = True
if self.link_exits(origin_exit,dest_exit,print_log,self.entrance_shuffle != "Uncoupled",True):
if True: # or not check_ds_access or self.check_ds_access(dest):
done = True
origin_final = origin_exit
dest_final = dest_exit
if not done:
quarantine_d.append(dest_exit)
if try_link:
self.unlink_exits(origin_exit,dest_exit,print_log,True,True)
dest_exit = 0
if not done:
quarantine_o.append(origin_exit)
dest_exits += quarantine_d
quarantine_d.clear()
if not done:
if print_log:
print("ERROR: No suitable links could be found - in quarantine:",quarantine_o)
return False
# Clean up O/D lists
origin_exits += quarantine_o
for exit in origin_exits:
if self.exits[exit][1] != -1:
origin_exits.remove(exit)
for exit in dest_exits:
if self.exits[exit][2] != -1:
dest_exits.remove(exit)
return [origin_final,dest_final,origin_exits,dest_exits]
# Check if you can access one node from another
def check_access(self,origin=-1,dest=-1,check_mutual=False,print_log=False):
if origin not in self.graph or dest not in self.graph:
return False
if self.graph[origin][7] or self.graph[dest][7]:
return False
success = False
if origin == dest or dest in self.graph[origin][10]:
success = True
to_visit = self.graph[origin][10][:]
visited = [origin]
while not success and to_visit:
node = to_visit.pop(0)
visited.append(node)
if not self.graph[node][7] and dest in self.graph[node][10]:
success = True
else:
for x in self.graph[node][10]:
if x not in to_visit+visited:
to_visit.append(x)
if not check_mutual or not success:
return success
return self.check_access(dest,origin,False,print_log)
# Build islands, i.e. mutually-accessible nodes
def build_islands(self,print_log=False):
islands = []
visited = []
start_island = []
for node in self.graph:
if node not in visited and self.graph[node][2]:
to_visit = [node]
new_nodes = []
origin_exits = []
dest_exits = []
origin_logic = []
dest_logic = []
is_start = False
is_island = False
while to_visit:
x = to_visit.pop(0)
visited.append(x)
new_nodes.append(x)
if 0 in self.graph[x][8]:
is_start = True
for exit in self.graph[x][14]:
if self.exits[exit][1] == -1:
origin_exits.append(exit)
for exit in self.graph[x][15]:
if self.exits[exit][2] == -1:
dest_exits.append(exit)
for edge in self.graph[x][12]:
if self.logic[edge][0] == 0:
origin_logic.append(edge)
for edge in self.graph[x][13]:
if self.logic[edge][0] == 0:
dest_logic.append(edge)
for y in self.graph[x][10]:
if y not in visited+to_visit:
if self.check_access(x,y,True,print_log):
to_visit.append(y)
island = [new_nodes,origin_exits,dest_exits,origin_logic,dest_logic]
if is_start:
start_island = island
else:
islands.append(island)
return [start_island,islands]
# Entrance randomizer
def shuffle_exits(self,print_log=False):
# Map passages and internal dungeon exits to graph and list all available exits
one_way_exits = []
for x in self.exits:
if self.is_exit_coupled(x) and (not self.exits[x][3] or not self.exits[x][4]): # Map missing O/D data for coupled exits
xprime = self.exits[x][0]
self.exits[x][3] = self.exits[xprime][4]
self.exits[x][4] = self.exits[xprime][3]
if not self.exits[x][1] and (self.exits[x][5] or self.exits[x][6]) and not self.exits[x][7] and (not self.exits[x][8] or self.exits[x][9]):
self.exits[x][1] = -1 # Mark exit for shuffling
self.exits[x][2] = -1
if not self.is_exit_coupled(x):
one_way_exits.append(x)
self.graph[self.exits[x][3]][14].append(x)
self.graph[self.exits[x][4]][15].append(x)
# Preserve Mu key door link
self.link_exits(310,310,print_log)
# Set aside Jeweler's final exit in RJH seeds
if self.goal == "Red Jewel Hunt":
self.link_exits(720,720,print_log)
# If in Coupled mode, map one_way exits first
exit_log = []
if self.entrance_shuffle == "Coupled":
one_way_dest = one_way_exits[:]
random.shuffle(one_way_dest)
while one_way_exits:
exit1 = one_way_exits.pop()
exit2 = one_way_dest.pop()
self.link_exits(exit1, exit2, print_log, False)
exit_log.append([exit1,exit2])
if print_log:
print( "One-way exits mapped")
# Assume all items and abilities
all_items = self.list_item_pool(1) + self.list_item_pool(2)
self.items_collected = all_items
self.update_graph(True,True,True,print_log)
if print_log:
print(" Graph updated. Beginning exit shuffle...")
# for x in self.graph:
# print(x,self.graph[x])
# Build world skeleton with islands
self.unsolve()
island_result = self.build_islands()
start_island = island_result[0]
islands = island_result[1]
islands_built = []
traverse_result = self.traverse()
visited = traverse_result[0]
origin_exits = []
for node in visited:
origin_exits += self.graph[node][14]
if print_log:
# i = 0
# for x in islands:
# i += 1
# print("Island",i,x[1],x[2])
# for y in x[0]:
# print("-",self.graph[y][5])
print(" Assembling islands...")
random.shuffle(islands)
check_direction = True
check_progression = True
quarantine = []
while islands:
island = islands.pop(0)
nodes_new = island[0]
origin_exits_new = island[1]
dest_exits_new = island[2]
# if print_log:
# for y in nodes_new:
# print("-",self.graph[y][5])
if not dest_exits_new or not origin_exits_new or self.is_accessible(nodes_new[0]):
if print_log and False:
print(" NOT ELIGIBLE")
else:
if (check_progression and not origin_exits_new) or (self.entrance_shuffle == "Coupled" and (len(origin_exits_new) < 2 or len(dest_exits_new) < 2)):
quarantine.append(island)
# if print_log:
# print(" REJECTED")
else:
# if print_log:
# print(" ATTEMPTING...")
random.shuffle(origin_exits)
random.shuffle(dest_exits_new)
result = self.find_exit(origin_exits,dest_exits_new,print_log,check_direction,True)
if not result:
quarantine.append(island)
else:
traverse_result = self.traverse(island[0])
visited += traverse_result[0]
progression_result = self.get_open_exits()
origin_exits = progression_result[0]
check_direction = True
if not islands:
if check_direction:
check_direction = False
islands += quarantine
quarantine.clear()
elif check_progression:
check_progression = False
check_direction = True
islands += quarantine
quarantine.clear()
if print_log:
print(" Island construction complete")
# Check island Dark Space access, map exits accordingly
self.reset_progress()
#self.initialize_ds()
self.update_graph(True,True,True)
island_result = self.build_islands()
islands = island_result[1]
islands_no_ds = []
for island in islands:
if self.is_accessible(island[0][0]) and not self.check_ds_access(island[0][0]):
islands_no_ds.append(island)
if islands_no_ds:
if print_log:
print("Islands with no DS access:")
i = 0
for x in islands_no_ds:
i += 1
print("Island",x)
for y in x[0]:
print("-",self.graph[y][5])
dest_exits_ds = []
for node in self.graph:
if node not in visited and self.check_ds_access(node):
for exit in self.graph[node][15]:
if self.exits[exit][2] == -1:
dest_exits_ds.append(exit)
while islands_no_ds:
island = islands_no_ds.pop(0)
result = self.find_exit(island[1],dest_exits_ds,print_log,check_direction)
if not result:
if print_log:
print("ERROR: Could not find Dark Space access")
return False
else:
dest_exits_ds = result[3]
if print_log:
print(" Dark Space access check successful")
# Clean up the rest of the exits
self.reset_progress()
self.update_graph(True,True,True)
self.traverse()
check_progression = True
check_direction = True
while origin_exits:
progression_result = self.get_open_exits(check_progression)
origin_exits = progression_result[0]
dest_exits = progression_result[1]
random.shuffle(origin_exits)
random.shuffle(dest_exits)
if origin_exits:
result = self.find_exit(origin_exits,dest_exits,print_log,check_direction,check_progression,True,False)
if result:
origin_exit = result[0]
dest_exit = result[1]
dest = self.exits[dest_exit][4]
self.traverse([dest])
elif check_direction:
check_direction = False
elif check_progression:
check_progression = False
check_direction = True
if print_log:
print(" Finished mapping progression exits")
else:
if print_log:
print("WARNING: This shouldn't happen")
origin_exits = []
# Quality check for missing exits
origin_exits = []
dest_exits = []
for exit in self.exits:
if self.exits[exit][1] == -1:
if print_log:
print("How'd we miss this one??", self.exits[exit][10])
origin_exits.append(exit)
if self.exits[exit][2] == -1:
if print_log:
print("This one too??", self.exits[exit][10])
dest_exits.append(exit)
while origin_exits:
origin_exit = origin_exits.pop(0)
if not dest_exits:
if print_log:
print("ERROR: Entrance rando failed")
return False
dest_exit = dest_exits.pop(0)
self.link_exits(origin_exit,dest_exit,print_log,self.entrance_shuffle != "Uncoupled")
# Wrap it up
# self.reset_progress()
# self.update_graph(True,True,True)
if print_log:
print("Entrance rando successful!")
return True
def initialize_ds(self):
# Clear DS access data from graph
for x in self.graph:
self.graph[x][4] = 0
self.graph[x][9].clear()
# Find nodes that contain Dark Spaces
pyramid_ds_id = 130 # Special case for Pyramid DS
self.ds_locations = [pyramid_ds_id]
self.ds_nodes = [self.item_locations[pyramid_ds_id][0]]
self.freedan_locations = self.ds_locations[:]
self.freedan_nodes = self.ds_nodes[:]
for x in self.item_locations:
if self.item_locations[x][1] == 2:
self.ds_locations.append(x)
self.ds_nodes.append(self.item_locations[x][0])
if not self.is_sublist(self.item_locations[x][4], [64, 65, 66]) and self.item_locations[x][3] not in [61,62,63,64,65,66]:
self.freedan_locations.append(x)
self.freedan_nodes.append(self.item_locations[x][0])
return True
# Translates logic and exits to world graph
def update_graph(self,update_logic=True,update_ds=True,update_exits=False,print_log=False):
if print_log:
print("Updating graph...")
if update_exits:
for exit in self.exits:
if exit > 21 or self.exits[exit][5] or exit in self.exits_detailed:
# Check if exit has been shuffled
if self.exits[exit][1] > 0:
new_exit = self.exits[exit][1]
elif self.exits[exit][1] == 0:
new_exit = exit
else:
new_exit = -1
# Get exit origin
if new_exit > 0:
origin = self.exits[exit][3]
if not origin and self.is_exit_coupled(exit):
sister_exit = self.exits[exit][0]
origin = self.exits[sister_exit][4]
self.exits[exit][3] = origin
# Get (new) exit destination
if self.exits[new_exit][2] == 0 or self.exits[new_exit][2] == exit:
dest = self.exits[new_exit][4]
if not dest and self.is_exit_coupled(new_exit):
sister_exit = self.exits[new_exit][0]
dest = self.exits[sister_exit][3]
self.exits[new_exit][4] = dest
# Translate link into world graph
if origin and dest and (dest not in self.graph[origin][1]):
self.graph[origin][1].append(dest)
if print_log:
print(" Exits updated")
# Update logic edges (except those requiring Freedan access)
if update_logic:
for edge in self.logic:
if not self.logic[edge][3]:
self.check_edge(edge)
if print_log:
print(" Logic updated (item/abilities)")
for node in self.graph:
for x in self.graph[node][1]:
if x not in self.graph[node][10]:
self.graph[node][10].append(x)
for y in self.graph[node][10]:
if node not in self.graph[y][8]:
self.graph[y][8].append(node)
for z in self.graph[node][8]:
if node not in self.graph[z][10]:
self.graph[z][10].append(node)
if print_log:
print(" Graph updated")
if update_ds:
# Map DS access to nodes
self.initialize_ds()
self.update_ds_access(self.ds_nodes,1)
for node in self.freedan_nodes:
self.update_ds_access([node],2,[node])
if print_log:
print(" DS access updated")
# Update logic requiring Freedan access
if update_logic:
for edge in self.logic:
if self.logic[edge][3]:
self.check_edge(edge)
if print_log:
print(" Logic updated (DS access)")
#for x in self.graph:
# print(x,self.graph[x][11],self.graph[x][5])
#print(x,self.graph[x][4],self.graph[x][9],self.graph[x][5])
return True
# Check whether a node's DS access data needs to be updated
def consider_ds_node(self,node,access_mode=1,ds_nodes=[]):
if access_mode == 2:
if not self.graph[node][2] or self.graph[node][7]:
return False
success = False
for x in ds_nodes:
if x not in self.graph[node][9]:
success = True
return success
if not self.graph[node][4]:
return True
return False
# Check if a node has Dark Space access
def check_ds_access(self, start_node=-1, need_freedan=False, items=[]):
if start_node not in self.graph:
return False
if not self.graph[start_node][2] or self.graph[start_node][4] == 2 or (self.graph[start_node][4] == 1 and not need_freedan):
return True
elif not items:
return False
else:
to_visit = [start_node]
visited = []
ds_access = False
while not ds_access and to_visit:
node = to_visit.pop(0)
visited.append(node)
if self.check_ds_access(node,need_freedan):
return True
else:
for edge in self.graph[node][12]:
dest = self.logic[edge][2]
if dest not in visited+to_visit and not self.logic[edge][0] and self.check_edge(edge,items,False):
to_visit.append(dest)
return False
# graph_copy = copy.deepcopy(self.graph)
# self.update_graph(False,True,False)
# result = self.check_ds_access(start_node, need_freedan)
# self.graph = graph_copy
# graph_copy = None
# return result
# Update a node's DS access data - recursive for all backwards-accessible nodes
def update_ds_access(self,nodes=[],access_mode=1,ds_nodes=[]):
if not nodes:
return True
to_visit = []
for node in nodes:
if self.graph[node][4] < access_mode:
self.graph[node][4] = access_mode
for ds_node in ds_nodes:
if ds_node not in self.graph[node][9]:
self.graph[node][9].append(ds_node)
for x in self.graph[node][8]:
if self.consider_ds_node(x,access_mode,ds_nodes):
to_visit.append(x)
return self.update_ds_access(to_visit,access_mode,ds_nodes)
# Check a logic edge to see if prerequisites have been met
def check_edge(self, edge, items=[], update_graph=True, print_log=False):
success = False
if edge not in self.logic:
if print_log:
print("WARNING: Not a valid logic ID:",edge)
return False
elif self.logic[edge][0] == -1:
return False
elif self.logic[edge][0] > 0:
success = True
req_items = []
for req in self.logic[edge][4]:
i = 0
while i < req[1]:
req_items.append(req[0])
i += 1
if self.is_sublist(self.items_collected+items, req_items) and (not self.logic[edge][3] or self.check_ds_access(self.logic[edge][1],True)):
success = True
if success and update_graph:
self.open_edge(edge)
return success
# Open a logic edge and translate results to graph
def open_edge(self, edge=-1, test=False, print_log=False):
if edge not in self.logic:
return False
if self.logic[edge][0] == -1:
if print_log:
print("WARNING: Tried to open an edge that is restricted")
return False
if not self.logic[edge][0] and not test:
self.logic[edge][0] = 1
origin = self.logic[edge][1]
dest = self.logic[edge][2]
return self.new_connection(origin,dest,test)
# Map a new connection (i.e. exit, logic) to graph
def new_connection(self, origin, dest, test=False, print_log=False):
if not test:
# To/from data
if dest not in self.graph[origin][10]:
self.graph[origin][10].append(dest)
if origin not in self.graph[dest][8]:
self.graph[dest][8].append(origin)
# Dark Space access data
if self.graph[dest][4] > self.graph[origin][4]:
self.update_ds_access([origin],self.graph[dest][4],self.graph[dest][9])
# Return list of newly-accessible nodes
if self.is_accessible(origin) and not self.is_accessible(dest):
traverse_result = self.traverse([dest],test,print_log)
return traverse_result[0]
return []
# to_visit = [dest]
# while to_visit:
# node = to_visit.pop(0)
# new_nodes.append(node)
# if not test:
# self.visit_node(node,test,print_log)
# for x in self.graph[node][10]:
# if x != node and x not in to_visit+new_nodes and not self.is_accessible(x):
# to_visit.append(x)
# return new_nodes
def restrict_edge(self, edge=-1):
try:
self.logic[edge][0] = -1
return True
except:
return False
def unrestrict_edge(self, edge=-1):
try:
self.logic[edge][0] = 0 if self.logic[edge][0] != 1 else self.logic[edge][0]
return True
except:
return False
# Initialize World parameters
def initialize(self,print_log=False):
# Manage required items
if 1 in self.dungeons_req:
self.required_items += [3, 4, 7, 8]
if 2 in self.dungeons_req:
self.required_items += [14]
if 3 in self.dungeons_req:
self.required_items += [18, 19]
if 5 in self.dungeons_req:
self.required_items += [38, 30, 31, 32, 33, 34, 35]
if 6 in self.dungeons_req:
self.required_items += [39]
if self.kara == 1:
self.required_items += [2, 9, 23]
elif self.kara == 2:
self.required_items += [11, 12, 15]
elif self.kara == 4:
self.required_items += [26]
elif self.kara == 5:
self.required_items += [28, 66]
# Update inventory space logic
if 3 in self.dungeons_req:
self.item_pool[19][4] = True
if 5 in self.dungeons_req:
self.item_pool[30][4] = True
self.item_pool[31][4] = True
self.item_pool[32][4] = True
self.item_pool[33][4] = True
self.item_pool[34][4] = True
self.item_pool[35][4] = True
self.item_pool[38][4] = True
# Solid Arm can only be required in Extreme
if self.difficulty < 3:
self.exits[21][4] = self.exits[21][3]
# Allow glitches *********************
if "Allow Glitches" in self.variant:
self.graph[0][1].append(601)
self.graph[61][1].append(62) # Moon Tribe: No ability required
self.graph[181][1].append(182) # Sky Garden: Ramp glitch
self.graph[181][1].append(184)
self.graph[182][1].append(185)
self.graph[222][1].append(221) # Mu: Golem skip
self.logic[268][4][1][1] = 0 # Ankor Wat: Earthquaker not required
self.logic[273][4][0][1] = 0 # Ankor Wat: Glasses not required
self.logic[274][4][0][1] = 0
self.item_locations[124][2] = False # Ankor Wat: Dropdown DS has abilities
self.graph[410][1].append(411) # Pyramid: No ability required
self.item_locations[142][2] = False # Pyramid: Bottom DS can have abilities
if not self.fluteless:
self.graph[182][1].append(183) # Sky Garden: cage glitch
self.item_locations[94][2] = False # Great Wall: Slider glitch
self.graph[294][1].append(295)
# Early Firebird
if self.firebird:
self.graph[0][1].append(602)
self.unrestrict_edge(405)
# Zelda 3 Mode
if "Z3 Mode" in self.variant:
# Update item pool
self.item_pool[1][0] = 29 # Red Jewels
self.item_pool[50][0] = 5 # HP upgrades
self.item_pool[51][0] = 2 # DEF upgrades
self.item_pool[52][0] = 3 # STR upgrades
self.item_pool[55][0] = 12 # HP Pieces
# Open Mode
if "Open Mode" in self.variant:
# Update graph logic
self.logic[30][0] = 2 # Lola's Letter
self.logic[31][0] = 2
self.logic[32][0] = 2
self.logic[33][0] = 2 # Memory Melody
self.logic[36][0] = 2 # Teapot
self.logic[38][0] = 2 # Will
self.logic[39][0] = 2
self.logic[40][0] = 2 # Roast
# Remove travel items from pool
self.item_pool[10][0] = 0 # Large Roast
self.item_pool[13][0] = 0 # Memory Melody
self.item_pool[24][0] = 0 # Will
self.item_pool[25][0] = 0 # Teapot
self.item_pool[37][0] = 0 # Lola's Letter
self.item_pool[6][0] += 4 # Herbs
self.item_pool[0][0] += 1 # Nothing
# Chaos mode -- MAY NOT NEED THIS ANYMORE
# if self.logic_mode == "Chaos":
# # Add "Inaccessible" node to graph
# self.graph[INACCESSIBLE] = [False, [], 0, [0,0,0,b"\x00"], 0, "Inaccessible", [], False, [], [], [], [], [], [], [], []]
#
# # Towns can have Freedan abilities
# for x in self.item_locations:
# if self.item_locations[x][4] == [64, 65, 66]:
# self.item_locations[x][4].clear()
#
# Several locked Dark Spaces can have abilities
# ds_unlock = [74, 94, 124, 142]
#
# if 1 not in self.dungeons_req: # First DS in Inca
# ds_unlock.append(29)
# if self.kara != 1: # DS in Underground Tunnel
# ds_unlock.append(19)
# if self.kara != 5: # DS in Ankor Wat garden
# ds_unlock.append(122)
#
# for x in ds_unlock:
# self.item_locations[x][2] = False
# Red Jewel Hunts change the graph
if self.goal == "Red Jewel Hunt":
self.logic[24][2] = 492
self.logic[25][2] = 492
self.logic[26][2] = 492
self.logic[27][2] = 492
del self.logic[406]
del self.logic[407]
# Change graph logic depending on Kara's location
if self.kara == 1:
self.unrestrict_edge(400)
self.graph[49][6].append(20)
elif self.kara == 2:
self.unrestrict_edge(401)
self.graph[150][6].append(20)
# Change "Sam" to "Samlet"
self.location_text[45] = b"\x63\x80\x8c\x8b\x84\xa4"
elif self.kara == 3:
self.unrestrict_edge(402)
self.graph[270][6].append(20)
elif self.kara == 4:
self.unrestrict_edge(403)
self.graph[345][6].append(20)
elif self.kara == 5:
self.unrestrict_edge(404)
self.graph[391][6].append(20)
# Change logic based on which dungeons are required
for x in self.statues:
self.logic[406][4][x][1] = 1
# Change item pool for "player choice" statue requirement variant
if self.statue_req == StatueReq.PLAYER_CHOICE.value:
self.item_pool[100][0] = 0
self.item_pool[101][0] = 0
self.item_pool[102][0] = 0
self.item_pool[103][0] = 0
self.item_pool[104][0] = 0
self.item_pool[105][0] = 0
self.item_pool[106][0] = 6
# Incorporate item locations and logic edges into world graph
for x in self.item_locations:
self.graph[self.item_locations[x][0]][11].append(x)
for y in self.logic:
if self.logic[y][0] != -1:
self.graph[self.logic[y][1]][12].append(y)
self.graph[self.logic[y][2]][13].append(y)
# Random start location
if self.start_mode != "South Cape":
self.start_loc = self.random_start()
if print_log:
print("Start location:",self.item_locations[self.start_loc][9])
if self.start_loc == 19: # Open Lily's door when starting in Underground Tunnel
self.logic[62][0] = 2
# elif self.start_loc == 30: # Inca ramp can hardlock you -- NEW FIX MAKES THIS OBSELETE
# self.graph[83][1].append(82)
elif self.start_loc == 47: # Diamond Mine behind fences
self.graph[131][1].append(130)
if self.start_mode != "South Cape" or self.entrance_shuffle != "None":
self.graph[0][1].remove(22)
self.graph[0][1].append(self.item_locations[self.start_loc][0])
# TEMP - grant Psycho Dash at start for fluteless seeds
if self.fluteless:
self.fill_item(61,self.start_loc,False,True,print_log)
# Boss Shuffle
if "Boss Shuffle" in self.variant:
boss_entrance_idx = [1,4,7,10,13,16,19]
boss_exit_idx = [3,6,9,12,15,18,21]
dungeon = 0
if print_log:
print("Boss order: ",self.boss_order)
while dungeon < 7:
boss = self.boss_order[dungeon]
entrance_old = boss_entrance_idx[dungeon]
entrance_new = boss_entrance_idx[boss-1]
exit_old = boss_exit_idx[boss-1]
exit_new = boss_exit_idx[dungeon]
self.link_exits(entrance_old,entrance_new,print_log)
if self.exits[exit_old][5] or exit_old in self.exits_detailed:
self.link_exits(exit_old,exit_new,print_log)
dungeon += 1
# Overworld shuffle
if "Overworld Shuffle" in self.variant:
if not self.shuffle_overworld(print_log):
if print_log:
print("ERROR: Overworld shuffle failed")
return False
# Shuffle exits
if self.entrance_shuffle != "None":
if not self.shuffle_exits(print_log):
if print_log:
print("ERROR: Entrance rando failed")
return False
self.reset_progress(True)
#self.initialize_ds()
self.update_graph(True,True,True)
# Initialize Dark Space information
if self.logic_mode == "Completable":
if not self.lock_dark_spaces(print_log):
if print_log:
print("ERROR: Could not lock Dark Spaces")
return False
return True
# Update item placement logic after abilities are placed
def check_logic(self,location=0):
abilities = [61, 62, 63, 64, 65, 66]
inaccessible_ls = []
# Check for abilities in critical Dark Spaces
if self.item_locations[19][3] in abilities: # Underground Tunnel
inaccessible_ls += [17, 18]
self.restrict_edge(63)
if self.item_locations[29][3] in abilities: # Inca Ruins
inaccessible_ls += [26, 27, 30, 31, 32]
self.restrict_edge(94)
if (self.item_locations[46][3] in abilities and # Diamond Mine
self.item_locations[47][3] in abilities and
self.item_locations[48][3] in abilities):
self.restrict_edge(118)
if (self.item_locations[58][3] in abilities and # Sky Garden
self.item_locations[59][3] in abilities and
self.item_locations[60][3] in abilities):
self.restrict_edge(131)
self.restrict_edge(132)
self.restrict_edge(144)
self.restrict_edge(147)
self.restrict_edge(148)
self.restrict_edge(149)
self.restrict_edge(150)
self.restrict_edge(151)
if self.item_locations[94][3] in abilities: # Great Wall
self.graph[700] = [False, [], 0, [3,15,0,b"\x00"], 0, "Great Wall - Behind Spin", [], False, [], [], [], [], [], [], [], []]
self.logic[700] = [0, 296, 700, False, [[63, 1]]]
self.item_locations[93][0] = 700
self.logic[222][3] = True
if self.item_locations[93][3] in abilities:
inaccessible_ls += [95]
self.restrict_edge(223)
self.restrict_edge(224)
if self.item_locations[122][3] in abilities: # Ankor Wat
inaccessible_ls += [117, 118, 119, 120, 121]
self.restrict_edge(267)
self.restrict_edge(268)
self.restrict_edge(269)
self.restrict_edge(270)
self.restrict_edge(271)
self.restrict_edge(272)
if self.item_locations[142][3] in abilities: # Pyramid
inaccessible_ls += [133,134,136,139,140]
self.restrict_edge(300)
self.restrict_edge(301)
self.restrict_edge(302)
self.restrict_edge(303)
self.restrict_edge(304)
self.restrict_edge(306)
self.restrict_edge(307)
self.restrict_edge(313)
# Change graph node for inaccessible_ls locations
for x in inaccessible_ls:
if x in self.graph[self.item_locations[x][0]][11]:
self.graph[self.item_locations[x][0]][11].remove(x)
self.item_locations[x][0] = INACCESSIBLE
# Simulate inventory
def get_inventory(self,start_items=[],item_destinations=[],new_nodes=[]):
if not start_items:
start_items = self.items_collected[:]
if not item_destinations:
item_destinations = self.item_destinations[:]
inventory_temp = []
for item in start_items:
if self.item_pool[item][4]:
inventory_temp.append(item)
# negative_inventory = []
# for node in self.graph:
# if self.is_accessible(node) or node in new_nodes:
# negative_inventory += self.graph[node][6]
inventory = []
while inventory_temp:
item = inventory_temp.pop(0)
if item in item_destinations:
item_destinations.remove(item)
else:
inventory.append(item)
return inventory
# Return list of accessible nodes
def list_accessible_nodes(self):
accessible = []
for x in self.graph:
if self.is_accessible(x):
accessible.append(x)
return accessible
def print_accessible_nodes(self):
print("Accessible nodes:")
for x in self.graph:
if self.is_accessible(x):
print("",self.graph[x][5])
def print_inaccessible_nodes(self):
print("Inccessible nodes:")
for x in self.graph:
if not self.is_accessible(x):
print("",self.graph[x][5])
# Takes a random seed and builds out a randomized world
def randomize(self, seed_adj=0, print_log=False):
random.seed(self.seed + seed_adj)
if self.race_mode:
for i in range(random.randint(100, 1000)):
_ = random.randint(0, 10000)
if self.race_mode:
for i in range(random.randint(100, 1000)):
_ = random.randint(0,10000)
if not self.initialize(print_log):
if print_log:
print("ERROR: Could not initialize world")
return False
if print_log:
print("Initialization complete")
# Initialize and shuffle location list
item_locations = self.list_item_locations()
random.shuffle(item_locations)
# Fill the Mystic Statues and room-clear rewards
self.fill_statues()
self.map_rewards()
# Forward fill progression items with Monte Carlo method
# Continue to place progression items until goal is reached
done = False
goal = False
cycle = 0
place_abilities = True
self.items_collected = self.list_item_pool(1) # Assume all items for ability placement
if print_log:
print("Beginning ability placement...")
while not done:
cycle += 1
if print_log:
print(" Cycle",cycle)
if cycle > MAX_CYCLES:
if print_log:
print("ERROR: Max cycles exceeded")
return False
self.traverse()
if place_abilities:
to_place = self.list_item_pool(2)
if not to_place:
done = True
else:
random.shuffle(to_place)
progress = False
while not progress and to_place:
ability = to_place.pop(0)
progress = self.forward_fill([ability],item_locations,False,self.logic_mode == "Chaos",print_log)
if progress:
self.check_logic()
else:
if print_log:
print("ERROR: Could not place any abilities")
return False
if done:
place_abilities = False
done = False
if print_log:
print(" Finished placing abilities")
print("Beginning item placement...")
# Randomly place non-progression items
self.traverse()
non_prog_items = self.list_item_pool(0, [], 2) + self.list_item_pool(0, [], 3)
for item in non_prog_items:
if item in self.items_collected:
self.items_collected.remove(item)
self.forward_fill(non_prog_items, item_locations, False, self.logic_mode == "Chaos", print_log)
# List and shuffle remaining key items
item_list = self.list_item_pool()
#random.shuffle(item_list)
# Reset graph, prepare for item placement
self.reset_progress(True)
self.update_graph()
else:
if len(self.get_inventory()) > MAX_INVENTORY:
goal = False
if print_log:
print("WARNING: Inventory capacity exceeded")
else:
goal = self.is_accessible(492)
# Get list of new progression options
#if print_log:
# print("Open edges:",self.open_edges)
# print("Open locations:",self.open_locations)
progression_result = self.progression_list()
if print_log:
print("Progression options: {")
print(" ",progression_result[0])
print(" ",progression_result[1])
print(" ",progression_result[2],"}")
progression_list = progression_result[0]
is_progression = (progression_result != [[],[],[]])
done = goal and (self.logic_mode != "Completable" or not is_progression)
if not done:
if not is_progression:
if print_log:
print("ERROR: Couldn't progress any further")
self.print_graph()
return False
progress = False
key = random.uniform(0,100)
while not progress and progression_list:
progression_mc = self.monte_carlo(progression_list)
idx = 0
for x in progression_mc:
if key <= x[0] and not idx:
idx = x[1]
items = progression_list.pop(idx)
if self.forward_fill(items, item_locations, False, self.logic_mode == "Chaos", print_log):
progress = True
# if print_log:
# print(" Placed progression items successfully")
if not progress:
if print_log:
print(" No suitable progression found, attempting to make room...")
if not self.make_room(progression_result,print_log):
if print_log:
print("ERROR: Could not find progression")
self.print_graph()
return False
if print_log:
print("Placing junk items...")
junk_items = self.list_item_pool()
#random.shuffle(junk_items)
self.random_fill(junk_items, item_locations, False, print_log)
if print_log:
print("Item placement complete, beginning final traversal...")
self.reset_progress(True)
self.update_graph()
self.traverse([],False,print_log)
if print_log:
locked_ds = [19,29,122]
for x in locked_ds:
if self.item_locations[x][3] in [61, 62, 63, 64, 65, 66]:
print("WARNING:",self.item_locations[x][9],"has an ability")
if self.logic_mode == "Completable" and self.goal != "Red Jewel Hunt":
completed = True
for node in self.graph:
if not self.graph[node][0] and node <600:
if print_log:
print("Can't reach ",self.graph[node][5])
completed = False
else:
completed = self.graph[492][0]
if not completed:
if print_log:
self.print_graph()
print("ERROR: Seed failed, trying again...")
print("")
return False
if print_log:
print("Writing hints...")
placement_log = self.placement_log[:]
random.shuffle(placement_log)
self.in_game_spoilers(placement_log)
if print_log:
print("Randomization complete!")
return True
def print_graph(self):
print("Open edges:",self.open_edges)
print("Open locations:",self.open_locations)
for node in self.graph:
print(node,self.graph[node])
# Prepares dataset to give in-game spoilers
def in_game_spoilers(self, placement_log=[]):
for x in placement_log:
item = x[0]
location = x[1]
if location not in self.free_locations and location in self.location_text:
if item in self.required_items or item in self.good_items or location in self.trolly_locations:
spoiler_str = b"\xd3" + self.location_text[location] + b"\xac\x87\x80\xa3\xcb"
spoiler_str += self.item_text_short[item] + b"\xc0"
# No in-game spoilers in Expert mode
if self.difficulty >= 3:
spoiler_str = b"\xd3\x8d\x88\x82\x84\xac\xa4\xa2\xa9\xac\x83\x8e\x83\x8e\x8d\x86\x8e\x4f\xc0"
self.spoilers.append(spoiler_str)
# print item, location
# Prints item and ability locations
def generate_spoiler(self, version=""):
if self.kara == 1:
kara_txt = "Edward's Castle"
elif self.kara == 2:
kara_txt = "Diamond Mine"
elif self.kara == 3:
kara_txt = "Angel Dungeon"
elif self.kara == 4:
kara_txt = "Mt. Kress"
elif self.kara == 5:
kara_txt = "Ankor Wat"
if self.difficulty == 0:
difficulty_txt = "Easy"
elif self.difficulty == 1:
difficulty_txt = "Normal"
elif self.difficulty == 2:
difficulty_txt = "Hard"
elif self.difficulty == 3:
difficulty_txt = "Extreme"
spoiler = dict()
spoiler["version"] = version
spoiler["seed"] = str(self.seed)
spoiler["date"] = str(datetime.utcfromtimestamp(time.time()))
spoiler["goal"] = str(self.goal)
spoiler["entrance_shuffle"] = str(self.entrance_shuffle)
spoiler["start_location"] = self.item_locations[self.start_loc][9].strip()
spoiler["logic"] = str(self.logic_mode)
spoiler["difficulty"] = str(difficulty_txt)
if self.statue_req == StatueReq.PLAYER_CHOICE.value:
spoiler["statues_required"] = self.statues_required
else:
spoiler["statues_required"] = self.statues
spoiler["boss_order"] = self.boss_order
spoiler["kara_location"] = kara_txt
spoiler["jeweler_amounts"] = self.gem
spoiler["inca_tiles"] = self.incatile
spoiler["hieroglyph_order"] = self.hieroglyphs
items = []
for x in self.item_locations:
if x < 500:
item = self.item_locations[x][3]
location_name = self.item_locations[x][9].strip()
item_name = self.item_pool[item][3]
items.append({"location": location_name, "name": item_name})
spoiler["items"] = items
if "Overworld Shuffle" in self.variant:
overworld_links = []
for continent_id, continent_data in self.overworld_menus.items():
continent_name = continent_data[7]
region_name = self.overworld_menus[continent_data[0]][8]
overworld_links.append({"region": region_name, "continent": continent_name})
spoiler["overworld_entrances"] = overworld_links
if self.entrance_shuffle != "None":
exit_links = []
for exit in self.exits:
exit_name = self.exits[exit][10]
linked_exit = self.exits[exit][1]
if not linked_exit:
exit_linked_name = exit_name
else:
exit_linked_name = self.exits[linked_exit][10]
exit_links.append({"entrance": exit_name, "exit": exit_linked_name})
spoiler["exit_links"] = exit_links
self.spoiler = spoiler
#self.complete_graph_visualization()
def complete_graph_visualization(self,print_log=False):
self.graph_viz = graphviz.Digraph(graph_attr=[('concentrate','true'),
('rankdir', 'TB')], strict=True)
graph = self.graph_viz
areas = dict()
area_names = ["Overworld",
"South Cape",
"Edward's Castle",
"Itory Village",
"Moon Tribe",
"Inca Ruins",
"Diamond Coast",
"Freejia",
"Diamond Mine",
"Neil's Cottage",
"Nazca Plain",
"Seaside Palace",
"Mu",
"Angel Village",
"Watermia",
"Great Wall",
"Euro",
"Mt. Kress",
"Native's Village",
"Ankor Wat",
"Dao",
"Pyramid",
"Babel",
"Jeweler's Mansion"]
graph.attr('node', shape='box')
for area_id in range(len(area_names)):
areas[area_id] = list()
for area_id in range(1,len(area_names)):
node_name = f"area_{area_id}"
node_content = area_names[area_id]
#areas[0].append((node_name, node_content))
for region_id, region_data in self.graph.items():
area = region_data[3][1]
node_name = f"region_{region_id}"
node_content = region_data[5]
areas[area].append((node_name, node_content))
for area_id, area_nodes in areas.items():
for node_id, node_content in area_nodes:
graph.node(node_id, node_content)
#with graph.subgraph(name=f"cluster_{area_id}") as c:
# c.attr(label=area_names[area_id],
# color="black")
# for node_id, node_content in area_nodes:
# if area_id != 0:
# c.node(node_id, node_content)
# else:
# graph.node(node_id,node_content)
for region_id, region_data in self.graph.items():
start_area = region_data[3][1]
node_name = f"region_{region_id}"
area_name = f"area_{start_area}"
for accessible_region_id in region_data[1]:
end_area = self.graph[accessible_region_id][3][1]
end_area_name = f"area_{end_area}"
accessible_node_name = f"region_{accessible_region_id}"
graph.edge(node_name, accessible_node_name)
#if start_area != 0 and end_area != 0:
# if start_area != end_area:
# graph.edge(area_name, end_area_name)
# else:
# graph.edge(node_name, accessible_node_name)
#elif start_area != 0:
# graph.edge(area_name, accessible_node_name)
#elif end_area != 0:
# graph.edge(node_name, end_area_name)
#else:
# graph.edge(node_name, accessible_node_name)
for _, logic_data in self.logic.items():
needed_items = logic_data[2]
enough_items = True
for item_id, quantity in needed_items:
existing_quantity = 0
if item_id not in self.item_pool:
if print_log:
print("Missing info about item:", item_id)
else:
existing_quantity = self.item_pool[item_id][0]
for _, location_data in self.item_locations.items():
if location_data[2] and item_id == location_data[3]:
existing_quantity += 1
if existing_quantity < quantity:
enough_items = False
break
if not enough_items:
continue
start_name = f"region_{logic_data[0]}"
dest_name = f"region_{logic_data[1]}"
start_area = self.graph[logic_data[0]][3][1]
end_area = self.graph[logic_data[1]][3][1]
area_name = f"area_{start_area}"
end_area_name = f"area_{end_area}"
graph.edge(start_name, dest_name)
#if start_area != 0 and end_area != 0:
# if start_area != end_area:
# graph.edge(area_name, end_area_name)
# else:
# graph.edge(start_name, dest_name)
#elif start_area != 0:
# graph.edge(area_name, dest_name)
#elif end_area != 0:
# graph.edge(start_name, end_area_name)
#else:
# graph.edge(start_name, dest_name)
per_region_item_node = dict()
item_location_color_map = {
1: "yellow",
2: "blue",
3: "green",
4: "white"
}
graph.attr('node', shape='plaintext')
for itemloc_id, itemloc_data in self.item_locations.items():
# Add Item_location_nodes
location_region = itemloc_data[0]
region_node_name = f"region_{location_region}"
region_item_node_name = f"region_itemnode_{location_region}"
if (itemloc_data[1] != 2 or itemloc_data[3] != 0) and itemloc_data[1] != 4:
if region_item_node_name not in per_region_item_node:
per_region_item_node[region_item_node_name] = []
graph.edge(region_node_name, f"{region_item_node_name}")
per_region_item_node[region_item_node_name].append((itemloc_id))
for region_item_node_name, locations_id in per_region_item_node.items():
node_content = "<<table border='0' cellborder='1' cellspacing='0'>"
for itemloc_id in locations_id:
itemloc_data = self.item_locations[itemloc_id]
item_name = self.item_pool[itemloc_data[3]][3]
location_name = itemloc_data[9]
if ":" in location_name:
location_name = ":".join(location_name.split(':')[1:])
location_type = itemloc_data[1]
node_content += f"""<tr>
<td ALIGN='left' bgcolor='{item_location_color_map[location_type]}'>{location_name.strip()}</td>
<td align='center'>{item_name}</td>
</tr>"""
node_content += "</table>>"
graph.node(region_item_node_name, node_content)
def print_enemy_locations(self, filepath, offset=0):
f = open(filepath, "r+b")
rom = f.read()
for enemy in self.enemies:
print(self.enemies[enemy][3])
done = False
addr = int("c8200", 16) + offset
while not done:
addr = rom.find(self.enemies[enemy][1], addr + 1)
if addr < 0 or addr > int("ce5e4", 16) + offset:
done = True
else:
f.seek(addr)
# f.write(b"\x55\x87\x8a\x05")
print(" ", addr, hex(addr), binascii.hexlify(f.read(4)))
f.close
# Prints item and ability locations
def print_spoiler(self):
if self.kara == 1:
kara_txt = "Edward's Castle"
elif self.kara == 2:
kara_txt = "Diamond Mine"
elif self.kara == 3:
kara_txt = "Angel Dungeon"
elif self.kara == 4:
kara_txt = "Mt. Kress"
elif self.kara == 5:
kara_txt = "Ankor Wat"
print("")
print("Seed > ", self.seed)
print("Statues Required > ", self.statues)
print("Kara Location > ", kara_txt)
print("Jeweler Reward Amounts > ", self.gem)
print("Inca Tile (column, row) > ", self.incatile)
print("Hieroglyph Order > ", self.hieroglyphs)
print("")
for x in self.item_locations:
item = self.item_locations[x][3]
location_name = self.item_locations[x][9]
item_name = self.item_pool[item][3]
print(location_name, " > ", item_name)
# Modifies game ROM to reflect the current state of the World
def write_to_rom(self, f, rom_offset=0, print_log=False):
# Room-clearing rewards
idx_tier2 = 0
idx_tier3 = 0
idx_tier4 = 0
for map in self.maps:
reward_tier = self.maps[map][2][1]
if reward_tier > 0:
reward = self.maps[map][2][0]
f.seek(int("1aade", 16) + map + rom_offset)
f.write(binascii.unhexlify(format(reward,"02x")))
# Populate player level logic
if reward_tier == 2:
f.seek(int("f4a7", 16) + 4*idx_tier2 + rom_offset)
f.write(binascii.unhexlify(format(map,"02x"))+b"\x03")
idx_tier2 += 1
elif reward_tier == 3:
f.seek(int("f4bf", 16) + 4*idx_tier3 + rom_offset)
f.write(binascii.unhexlify(format(map,"02x"))+b"\x03")
idx_tier3 += 1
elif reward_tier == 4:
f.seek(int("f4d7", 16) + 4*idx_tier4 + rom_offset)
f.write(binascii.unhexlify(format(map,"02x"))+b"\x03")
idx_tier4 += 1
#print("maps done")
# Items and abilities
for x in self.item_locations:
type = self.item_locations[x][1]
# Write items to ROM
if type == 1:
item = self.item_locations[x][3]
# print "Writing item ", item
item_addr = self.item_locations[x][5]
item_code = self.item_pool[item][2]
text1_addr = self.item_locations[x][6]
text2_addr = self.item_locations[x][7]
text3_addr = self.item_locations[x][8]
if item in self.item_text_long:
text_long = self.item_text_long[item]
else:
text_long = ""
if item in self.item_text_short:
text_short = self.item_text_short[item]
else:
text_short = ""
# Write item code to memory
if item_code and item_addr:
f.seek(int(item_addr, 16) + rom_offset)
f.write(item_code)
# Write item text, if appropriate
if text1_addr and text_long:
f.seek(int(text1_addr, 16) + rom_offset)
f.write(text_long)
# f.write(b"\xd3")
# f.write(text_short)
f.write(b"\xc9\x0a\xc0")
# Write "inventory full" item text, if appropriate
if text2_addr and text_long:
f.seek(int(text2_addr, 16) + rom_offset)
# f.write(b"\xd3")
# f.write(text_short)
f.write(text_long)
f.write(b"\xcb\x45\x65\x4b\x4b\x4f\xc9\x0a\xc0") # Just says "FULL!"
# Write jeweler inventory text, if apprpriate
if text3_addr and text_short:
f.seek(int(text3_addr, 16) + rom_offset)
f.write(text_short)
# Write abilities to ROM
elif type == 2: # Check if filled
ability = self.item_locations[x][3]
ability_addr = self.item_locations[x][5]
map = self.item_locations[x][8]
# Change Dark Space type in event table
if ability in [61, 62, 63, 64, 65, 66]:
f.seek(int(ability_addr, 16) + rom_offset)
f.write(b"\x05")
# Update ability text table
if ability == 61: # Psycho Dash
# f.seek(int("8eb5a",16)+2*i+rom_offset)
f.seek(int("8eb5a", 16) + rom_offset)
f.write(map)
if ability == 62: # Psycho Slide
f.seek(int("8eb5c", 16) + rom_offset)
f.write(map)
if ability == 63: # Spin Dash
f.seek(int("8eb5e", 16) + rom_offset)
f.write(map)
if ability == 64: # Dark Friar
f.seek(int("8eb60", 16) + rom_offset)
f.write(map)
if ability == 65: # Aura Barrier
f.seek(int("8eb62", 16) + rom_offset)
f.write(map)
if ability == 66: # Earthquaker
f.seek(int("8eb64", 16) + rom_offset)
f.write(map)
#print("items/abilities done")
# Special code for 2-item event in Dao
item1 = self.item_locations[125][3]
item2 = self.item_locations[126][3]
f.seek(int("8fde0", 16) + rom_offset)
f.write(b"\xd3" + self.item_text_short[item1] + b"\xcb")
f.write(self.item_text_short[item2] + b"\xc9\x0a\xcf\xce")
# Write in-game spoilers
i = 0
for addr in self.spoiler_addresses:
f.seek(int(self.spoiler_addresses[addr], 16) + rom_offset)
if i < len(self.spoilers):
f.write(self.spoilers[i])
i += 1
#print("spoilers done")
# Enemizer
if self.enemizer != "None":
# "Fix" Ankor Wat Gorgons so they don't fall from the ceiling
f.seek(int("bb825", 16) + rom_offset)
f.write(b"\x00\x00\x00\x02\x27\x0F\x02\xC1\x4C\xA0\xB8\x6B")
# Run enemizer
self.enemize(f, rom_offset)
# self.parse_maps(f,rom_offset)
# Random start location
if self.start_mode != "South Cape" or self.entrance_shuffle != "None":
# print self.start_loc
map_str = self.item_locations[self.start_loc][8] + self.item_locations[self.start_loc][7]
# Change game start location
f.seek(int("be517", 16) + rom_offset)
f.write(map_str)
# Change Dark Space warp location
f.seek(int("8dbea", 16) + rom_offset)
f.write(map_str)
# Change Dark Space warp text
map_name = self.location_text[self.start_loc]
f.seek(int("8de1f", 16) + rom_offset)
f.write(map_name + b"\x0D\xCB\xAC\x4D\x8E\xCB\xAC\x69\x84\xA3\xCA")
#print("random start done")
# Overworld shuffle
if "Overworld Shuffle" in self.variant:
ow_patch_data = []
for entry in self.overworld_menus:
# Prepare ROM edits
new_entry = self.overworld_menus[entry][0]
f.seek(int(self.overworld_menus[new_entry][4], 16) + rom_offset)
ow_patch_data.append([self.overworld_menus[entry][4], f.read(8)])
f.seek(int(self.overworld_menus[new_entry][6], 16) + rom_offset)
ow_patch_data.append([self.overworld_menus[entry][6], f.read(11)])
ow_patch_data.append([self.overworld_menus[new_entry][5], self.overworld_menus[entry][1]])
for x in ow_patch_data:
f.seek(int(x[0], 16) + rom_offset)
f.write(x[1])
#print("overworld shuffle done")
# Entrance shuffle
er_patch_data = []
for exit in self.exits:
#self.exits[exit][0] = exit #TESTING ONLY
# Prepare ROM edits
new_exit = self.exits[exit][1]
if new_exit and self.exits[exit][5]: # and exit != new_exit:
try:
if self.exits[new_exit][6]:
new_data = self.exits[new_exit][6]
else:
f.seek(int(self.exits[new_exit][5], 16) + rom_offset)
new_data = f.read(8)
er_patch_data.append([self.exits[exit][5], new_data])
except:
if print_log:
print("ERROR: exit data invalid",exit,new_exit)
for exit in self.exits_detailed:
new_exit = self.exits[exit][1]
if new_exit:
map_str = self.exits[new_exit][6]
map_id = map_str[0:1]
xcoord = int.to_bytes(int.from_bytes(map_str[1:3], byteorder="little") // 16, 2, byteorder='little')
ycoord = int.to_bytes(int.from_bytes(map_str[3:5], byteorder="little") // 16, 2, byteorder='little')
facedir = map_str[5:6]
camera = map_str[6:8]
# print(map_id,xcoord,ycoord,facedir,camera)
er_patch_data.append([self.exits_detailed[exit][0], map_id])
er_patch_data.append([self.exits_detailed[exit][1], xcoord])
er_patch_data.append([self.exits_detailed[exit][2], ycoord])
if self.exits_detailed[exit][3] != "":
er_patch_data.append([self.exits_detailed[exit][3], facedir])
er_patch_data.append([self.exits_detailed[exit][4], camera])
for x in er_patch_data:
try:
f.seek(int(x[0], 16) + rom_offset)
f.write(x[1])
except:
if print_log:
print("ERROR: Not a valid address", x)
#print("entrance shuffle done")
# Check for additional switches that need to be set
switch_str = []
if self.start_loc == 19: # Open Lily's door when starting in Underground Tunnel
switch_str.append(b"\x02\xcd\x13\x01")
# elif self.start_loc == 30: # Inca ramp can hardlock you -- NEW FIX MAKES THIS OBSELETE
# switch_str.append(b"\x02\xcd\x0c\x01")
elif self.start_loc == 47: # Diamond Mine behind fences
switch_str.append(b"\x02\xcd\x34\x01\x02\xcd\x35\x01\x02\xcd\x36\x01")
if "Open Mode" in self.variant:
switch_str.append(b"\x02\xcc\x11\x02\xcc\x14\x02\xcc\x1f\x02\xcc\x2a\x02\xcc\x41")
if self.enemizer != "None" and self.enemizer != "Limited":
switch_str.append(b"\x02\xcc\xa0\x02\xcc\xa1")
f.seek(int("1ffb0", 16) + rom_offset)
for x in switch_str:
f.write(x)
f.write(b"\x6b")
#print("switches done")
# Swapped exits
# for exit in self.exits:
# if self.exits[exit][1] > 0:
# to_exit = self.exits[exit][1]
# map_str = self.exits[to_exit][9]
# if self.exits[exit][8] != "":
# f.seek(int(self.exits[exit][8], 16) + rom_offset)
# f.write(map_str)
# else:
# map_id = map_str[0:1]
# xcoord = int.to_bytes(int.from_bytes(map_str[1:3], byteorder="little") // 16, 2, byteorder='little')
# ycoord = int.to_bytes(int.from_bytes(map_str[3:5], byteorder="little") // 16, 2, byteorder='little')
# facedir = map_str[5:6]
# camera = map_str[6:8]
# # print(map_id,xcoord,ycoord,facedir,camera)
#
# f.seek(int(self.exits_detailed[exit][0], 16) + rom_offset)
# f.write(map_id)
# f.seek(int(self.exits_detailed[exit][1], 16) + rom_offset)
# f.write(xcoord)
# f.seek(int(self.exits_detailed[exit][2], 16) + rom_offset)
# f.write(ycoord)
# if self.exits_detailed[exit][3] != "":
# f.seek(int(self.exits_detailed[exit][3], 16) + rom_offset)
# f.write(facedir)
# f.seek(int(self.exits_detailed[exit][4], 16) + rom_offset)
# f.write(camera)
# print "ROM successfully created"
# Print parsed list of map headers
def parse_maps(self, f, rom_offset=0):
f.seek(int("d8000", 16) + rom_offset)
header_lengths = {
b"\x02": 1,
b"\x03": 7,
b"\x04": 6,
b"\x05": 7,
b"\x06": 4,
b"\x0e": 3,
b"\x10": 6,
b"\x11": 5,
b"\x13": 2,
b"\x14": 1,
b"\x15": 1,
b"\x17": 5
}
done = False
addr = 0
map_dataset = {}
anchor_dataset = {}
while not done:
map_id = f.read(2)
print(binascii.hexlify(map_id))
map_headers = []
anchor_headers = []
map_done = False
anchor = False
while not map_done:
map_header = f.read(1)
if map_header == b"\x14":
anchor = True
anchor_id = f.read(1)
map_header += anchor_id
map_headers.append(map_header)
print(binascii.hexlify(map_header))
elif map_header == b"\x00":
map_done = True
print(binascii.hexlify(map_header))
print("")
else:
header_len = header_lengths[map_header]
map_header += f.read(header_len)
map_headers.append(map_header)
print(binascii.hexlify(map_header))
if anchor:
anchor_headers.append(map_header)
anchor_dataset[map_id] = map_headers
if anchor_headers:
anchor_dataset[anchor_id] = anchor_headers
if f.tell() >= int("daffe", 16) + rom_offset:
done = True
# print map_headers
print(anchor_headers)
# Pick random start location
def random_start(self,print_log=False):
locations = []
for loc in self.item_locations:
if (self.start_mode == "Forced Unsafe" and self.item_locations[loc][6] == "Unsafe") or (
self.start_mode != "Forced Unsafe" and self.item_locations[loc][6] == "Safe") or (
self.item_locations[loc][6] == self.start_mode):
locations.append(loc)
if not locations:
if print_log:
print("ERROR: Something is fishy with start locations")
return -1
else:
# print locations
# return 93 # TESTING!
return locations[random.randint(0, len(locations) - 1)]
# Shuffle travel destinations
def shuffle_overworld(self,print_log=False):
new_continents = [[],[],[],[],[]]
# Ensure each continent has at least one travel location
destination_list = [1,6,12,14,16,18]
random.shuffle(destination_list)
for continent in new_continents:
continent.append(destination_list.pop(0))
# Randomly assign the rest of the locations
destination_list += [2,3,4,5,7,8,9,10,11,13,15,17,19]
random.shuffle(destination_list)
new_continents[0] += destination_list[:4]
new_continents[1] += destination_list[4:8]
new_continents[2] += destination_list[8:10]
new_continents[3] += destination_list[10:13]
new_continents[4] += destination_list[-1:]
for continent in new_continents:
random.shuffle(continent)
self.overworld_menus[1][0] = new_continents[0][0]
self.overworld_menus[2][0] = new_continents[0][1]
self.overworld_menus[3][0] = new_continents[0][2]
self.overworld_menus[4][0] = new_continents[0][3]
self.overworld_menus[5][0] = new_continents[0][4]
self.overworld_menus[6][0] = new_continents[1][0]
self.overworld_menus[7][0] = new_continents[1][1]
self.overworld_menus[8][0] = new_continents[1][2]
self.overworld_menus[9][0] = new_continents[1][3]
self.overworld_menus[10][0] = new_continents[1][4]
self.overworld_menus[11][0] = new_continents[2][0]
self.overworld_menus[12][0] = new_continents[2][1]
self.overworld_menus[13][0] = new_continents[2][2]
self.overworld_menus[14][0] = new_continents[3][0]
self.overworld_menus[15][0] = new_continents[3][1]
self.overworld_menus[16][0] = new_continents[3][2]
self.overworld_menus[17][0] = new_continents[3][3]
self.overworld_menus[18][0] = new_continents[4][0]
self.overworld_menus[19][0] = new_continents[4][1]
self.graph[10][1].clear()
self.graph[11][1].clear()
self.graph[12][1].clear()
self.graph[13][1].clear()
self.graph[14][1].clear()
self.graph[10][10].clear()
self.graph[11][10].clear()
self.graph[12][10].clear()
self.graph[13][10].clear()
self.graph[14][10].clear()
# Add new overworld to the graph
for entry in self.overworld_menus:
new_entry = self.overworld_menus[entry][0]
self.graph[self.overworld_menus[entry][2]][1].append(self.overworld_menus[new_entry][3])
self.graph[self.overworld_menus[new_entry][3]][1].remove(self.overworld_menus[new_entry][2])
self.graph[self.overworld_menus[new_entry][3]][1].append(self.overworld_menus[entry][2])
return True
# Shuffle enemies in ROM
def enemize(self, f, rom_offset=0):
f.seek(0)
rom = f.read()
# test_enemy = 13 # TESTING!
# test_set = self.enemies[test_enemy][0]
complex_enemies = [4, 15, 53, 62, 88] # Enemies with many sprites, or are no fun
max_complex = 5
# Get list of enemysets
enemysets = []
for set in self.enemysets:
enemysets.append(set)
f.seek(0)
rom = f.read()
# Shuffle enemy stats in Insane
if self.enemizer == "Insane":
insane_enemies = []
insane_templates = []
for enemy in self.enemies:
if self.enemies[enemy][5] and enemy != 102: # Special exception for Zombies
insane_enemies.append(enemy)
insane_templates.append(self.enemies[enemy][2])
random.shuffle(insane_templates)
insane_dictionary = {}
i = 0
for enemy in insane_enemies:
insane_dictionary[enemy] = insane_templates[i]
i += 1
# Randomize enemy spritesets
for map in self.maps:
complex_ct = 0
oldset = self.maps[map][0]
# Determine new enemyset for map
if self.enemizer == "Limited":
sets = [oldset]
elif not self.maps[map][7]:
sets = enemysets[:]
else:
sets = self.maps[map][7][:]
random.shuffle(sets)
newset = sets[0]
# if 10 in sets: # TESTING!
# newset = 10
# newset = test_set # TESTING!
# Gather enemies from old and new sets
old_enemies = []
new_enemies = []
for enemy in self.enemies:
if self.enemies[enemy][0] == oldset:
old_enemies.append(enemy)
if self.enemies[enemy][0] == newset and self.enemies[enemy][5]:
new_enemies.append(enemy)
# Update map header to reflect new enemyset
if self.maps[map][3]:
self.map_patches.append([self.maps[map][3],self.enemysets[newset][0],self.maps[map][4]])
# Randomize each enemy in map
addr_start = self.maps[map][5]
addr_end = self.maps[map][6]
for enemy in old_enemies:
# print self.enemies[enemy][3]
done = False
addr = int(addr_start, 16) + rom_offset
while not done:
addr = rom.find(self.enemies[enemy][1] + self.enemies[enemy][2], addr + 1)
if addr < 0 or addr > int(addr_end, 16) + rom_offset:
done = True
else:
# Pick an enemy from new set
enemytype = self.enemies[enemy][3]
walkable = self.enemies[enemy][4]
new_enemies_tmp = new_enemies[:]
# Get X/Y for special placement exceptions
f.seek(addr - 3)
xcoord = binascii.hexlify(f.read(1))
ycoord = binascii.hexlify(f.read(1))
# 4-Ways cannot be on a #$XF x-coord
if newset == 1 and 13 in new_enemies_tmp:
if xcoord[1] == 102:
new_enemies_tmp.remove(13)
# Zip Flies can't be too close to map origin
elif newset == 10 and 103 in new_enemies_tmp:
if int(xcoord, 16) <= 4 or int(ycoord, 16) <= 4:
new_enemies_tmp.remove(103)
random.shuffle(new_enemies_tmp)
i = 0
found_enemy = False
# if 13 in new_enemies_tmp: # TESTING!
# new_enemy = 13
# found_enemy = True
while not found_enemy:
new_enemy = new_enemies_tmp[i]
new_enemytype = self.enemies[new_enemy][3]
new_walkable = self.enemies[new_enemy][4]
if walkable or new_enemytype == 3 or walkable == new_walkable or i == len(new_enemies_tmp) - 1:
found_enemy = True
# Limit number of complex enemies per map
if new_enemy in complex_enemies:
complex_ct += 1
if complex_ct >= max_complex:
for enemy_tmp in new_enemies:
if enemy_tmp in complex_enemies:
new_enemies.remove(enemy_tmp)
i -= 1
i += 1
f.seek(addr - 1)
# f.write(b"\x00" + self.enemies[test_enemy][1] + self.enemies[test_enemy][2]) # TESTING!
f.write(b"\x00" + self.enemies[new_enemy][1])
if self.enemizer == "Balanced" and enemy == 102:
f.write(b"\x47")
elif map != 27 and self.enemizer != "Balanced": # Moon Tribe cave enemies retain same template
if self.enemizer == "Insane" and new_enemy != 102: # Again, zombie exception
f.write(insane_dictionary[new_enemy])
else:
f.write(self.enemies[new_enemy][2])
# Disable all non-enemy sprites
if self.enemizer != "Limited":
for sprite in self.nonenemy_sprites:
f.seek(int(self.nonenemy_sprites[sprite][1], 16) + rom_offset + 3)
f.write(b"\x02\xe0")
# Build world
def __init__(self, settings: RandomizerData, statues_required=6, statues=[1,2,3,4,5,6], statue_req=StatueReq.GAME_CHOICE.value, kara=3, gem=[3,5,8,12,20,30,50], incatile=[9,5], hieroglyphs=[1,2,3,4,5,6], boss_order=[1,2,3,4,5,6,7]):
self.seed = settings.seed
self.race_mode = settings.race_mode
self.fluteless = settings.fluteless
self.statues = statues
self.statues_required = statues_required
self.statue_req = statue_req
self.boss_order = boss_order
self.dungeons_req = []
for x in self.statues:
self.dungeons_req.append(self.boss_order[x-1])
gaia_coinflip = random.randint(0, 1)
if settings.goal.value == Goal.RED_JEWEL_HUNT.value:
self.goal = "Red Jewel Hunt"
elif settings.goal.value == Goal.APO_GAIA.value or (settings.goal.value == Goal.RANDOM_GAIA.value and gaia_coinflip):
self.goal = "Apocalypse Gaia"
else:
self.goal = "Dark Gaia"
if settings.logic.value == Logic.COMPLETABLE.value:
self.logic_mode = "Completable"
elif settings.logic.value == Logic.BEATABLE.value:
self.logic_mode = "Beatable"
else:
self.logic_mode = "Chaos"
if settings.entrance_shuffle.value == EntranceShuffle.NONE.value:
self.entrance_shuffle = "None"
elif settings.entrance_shuffle.value == EntranceShuffle.COUPLED.value:
self.entrance_shuffle = "Coupled"
elif settings.entrance_shuffle.value == EntranceShuffle.UNCOUPLED.value:
self.entrance_shuffle = "Uncoupled"
if settings.start_location.value == StartLocation.SOUTH_CAPE.value:
self.start_mode = "South Cape"
elif settings.start_location.value == StartLocation.SAFE.value:
self.start_mode = "Safe"
elif settings.start_location.value == StartLocation.UNSAFE.value:
self.start_mode = "Unsafe"
else:
self.start_mode = "Forced Unsafe"
if settings.enemizer.value == Enemizer.NONE.value:
self.enemizer = "None"
elif settings.enemizer.value == Enemizer.BALANCED.value:
self.enemizer = "Balanced"
elif settings.enemizer.value == Enemizer.LIMITED.value:
self.enemizer = "Limited"
elif settings.enemizer.value == Enemizer.FULL.value:
self.enemizer = "Full"
else:
self.enemizer = "Insane"
if settings.ohko:
self.variant = ["OHKO"]
elif settings.red_jewel_madness:
self.variant = ["RJM"]
else:
self.variant = []
if settings.allow_glitches:
self.variant.append("Allow Glitches")
if settings.boss_shuffle:
self.variant.append("Boss Shuffle")
if settings.overworld_shuffle:
self.variant.append("Overworld Shuffle")
if settings.open_mode:
self.variant.append("Open Mode")
if settings.z3:
self.variant.append("Z3 Mode")
self.firebird = settings.firebird
self.start_loc = 10
# self.level = settings.level.value
self.difficulty = settings.difficulty.value
self.kara = kara
self.gem = gem
self.incatile = incatile
self.hieroglyphs = hieroglyphs
self.placement_log = []
self.exit_log = []
self.spoilers = []
self.required_items = [20, 36]
self.good_items = [10, 13, 24, 25, 37, 62, 63, 64]
self.trolly_locations = [32, 45, 64, 65, 102, 108, 121, 128, 136, 147]
self.free_locations = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 24, 33, 34, 35, 36, 37, 38, 39]
self.map_patches = []
self.visited = []
self.items_collected = []
self.item_destinations = []
self.open_locations = [[],[]]
self.open_edges = []
self.graph_viz = None
# Initialize item pool, considers special attacks as "items"
# Format = { ID: [Quantity, Type code (1=item, 2=ability, 3=statue,4=other),
# ROM Code, Name, TakesInventorySpace,
# ProgressionType (1=unlocks new locations,2=quest item,3=no progression)] }
self.item_pool = {
# Items
0: [2, 1, b"\x00", "Nothing", False, 3],
1: [45, 1, b"\x01", "Red Jewel", False, 1],
2: [1, 1, b"\x02", "Prison Key", True, 1],
3: [1, 1, b"\x03", "Inca Statue A", True, 1],
4: [1, 1, b"\x04", "Inca Statue B", True, 1],
5: [0, 1, b"\x05", "Inca Melody", True, 3],
6: [12, 1, b"\x06", "Herb", False, 3],
7: [1, 1, b"\x07", "Diamond Block", True, 1],
8: [1, 1, b"\x08", "Wind Melody", True, 1],
9: [1, 1, b"\x09", "Lola's Melody", True, 1],
10: [1, 1, b"\x0a", "Large Roast", True, 1],
11: [1, 1, b"\x0b", "Mine Key A", True, 1],
12: [1, 1, b"\x0c", "Mine Key B", True, 1],
13: [1, 1, b"\x0d", "Memory Melody", True, 1],
14: [4, 1, b"\x0e", "Crystal Ball", True, 2],
15: [1, 1, b"\x0f", "Elevator Key", True, 1],
16: [1, 1, b"\x10", "Mu Palace Key", True, 1],
17: [1, 1, b"\x11", "Purification Stone", True, 1],
18: [2, 1, b"\x12", "Statue of Hope", True, 1],
19: [2, 1, b"\x13", "Rama Statue", False, 2],
20: [1, 1, b"\x14", "Magic Dust", True, 2],
21: [0, 1, b"\x15", "Blue Journal", False, 3],
22: [1, 1, b"\x16", "Lance's Letter", False, 3],
23: [1, 1, b"\x17", "Necklace Stones", True, 1],
24: [1, 1, b"\x18", "Will", True, 1],
25: [1, 1, b"\x19", "Teapot", True, 1],
26: [3, 1, b"\x1a", "Mushroom Drops", True, 1],
27: [0, 1, b"\x1b", "Bag of Gold", False, 3],
28: [1, 1, b"\x1c", "Black Glasses", False, 1],
29: [1, 1, b"\x1d", "Gorgon Flower", True, 1],
30: [1, 1, b"\x1e", "Hieroglyph", False, 2],
31: [1, 1, b"\x1f", "Hieroglyph", False, 2],
32: [1, 1, b"\x20", "Hieroglyph", False, 2],
33: [1, 1, b"\x21", "Hieroglyph", False, 2],
34: [1, 1, b"\x22", "Hieroglyph", False, 2],
35: [1, 1, b"\x23", "Hieroglyph", False, 2],
36: [1, 1, b"\x24", "Aura", True, 1],
37: [1, 1, b"\x25", "Lola's Letter", False, 1],
38: [1, 1, b"\x26", "Father's Journal", False, 2],
39: [1, 1, b"\x27", "Crystal Ring", False, 1],
40: [1, 1, b"\x28", "Apple", True, 1],
41: [1, 1, b"\x2e", "2 Red Jewels", False, 1],
42: [1, 1, b"\x2f", "3 Red Jewels", False, 1],
# Status Upgrades
50: [3, 1, b"\x87", "HP Upgrade", False, 3],
51: [1, 1, b"\x89", "DEF Upgrade", False, 3],
52: [2, 1, b"\x88", "STR Upgrade", False, 3],
53: [1, 1, b"\x8a", "Psycho Dash Upgrade", False, 3],
54: [2, 1, b"\x8b", "Dark Friar Upgrade", False, 3],
55: [0, 1, b"\x8c", "Heart Piece", False, 3],
# Abilities
60: [0, 2, "", "Nothing", False, 3],
61: [1, 2, "", "Psycho Dash", False, 1],
62: [1, 2, "", "Psycho Slider", False, 1],
63: [1, 2, "", "Spin Dash", False, 1],
64: [1, 2, "", "Dark Friar", False, 1],
65: [1, 2, "", "Aura Barrier", False, 1],
66: [1, 2, "", "Earthquaker", False, 1],
67: [0, 2, "", "Firebird", False, 1],
# Mystic Statues
100: [1, 3, "", "Mystic Statue 1", False, 2],
101: [1, 3, "", "Mystic Statue 2", False, 2],
102: [1, 3, "", "Mystic Statue 3", False, 2],
103: [1, 3, "", "Mystic Statue 4", False, 2],
104: [1, 3, "", "Mystic Statue 5", False, 2],
105: [1, 3, "", "Mystic Statue 6", False, 2],
106: [0, 3, "", "Mystic Statue", False, 2],
# Event Switches
500: [0, 4, "", "Kara Released", False, 1],
501: [0, 4, "", "Itory: Got Lilly", False, 1],
502: [0, 4, "", "Moon Tribe: Healed Spirits", False, 1],
503: [0, 4, "", "Inca: Beat Castoth", False, 1],
504: [0, 4, "", "Freejia: Found Laborer", False, 1],
505: [0, 4, "", "Neil's: Memory Restored", False, 1],
506: [0, 4, "", "Sky Garden: Map 82 NW Switch", False, 1],
507: [0, 4, "", "Sky Garden: Map 82 NE Switch", False, 1],
508: [0, 4, "", "Sky Garden: Map 82 NW Switch", False, 1],
509: [0, 4, "", "Sky Garden: Map 84 Switch", False, 1],
510: [0, 4, "", "Seaside: Fountain Purified", False, 1],
511: [0, 4, "", "Mu: Water Lowered 1", False, 1],
512: [0, 4, "", "Mu: Water Lowered 2", False, 1],
513: [0, 4, "", "Angel: Puzzle Complete", False, 1],
514: [0, 4, "", "Mt Kress: Drops Used 1", False, 1],
515: [0, 4, "", "Mt Kress: Drops Used 2", False, 1],
516: [0, 4, "", "Mt Kress: Drops Used 3", False, 1],
517: [0, 4, "", "Pyramid: Hieroglyphs Placed", False, 1],
518: [0, 4, "", "Babel: Castoth Defeated", False, 1],
519: [0, 4, "", "Babel: Viper Defeated", False, 1],
520: [0, 4, "", "Babel: Vampires Defeated", False, 1],
521: [0, 4, "", "Babel: Sand Fanger Defeated", False, 1],
522: [0, 4, "", "Babel: Mummy Queen Defeated", False, 1],
523: [0, 4, "", "Mansion: Solid Arm Defeated", False, 1],
# Misc
600: [0, 4, "", "Freedan Access", False, 1],
601: [0, 4, "", "Glitches", False, 1],
602: [0, 4, "", "Early Firebird", False, 1]
}
# Define Item/Ability/Statue locations
# Format: { ID: [Region, Type (1=item,2=ability,3=statue,4=other), Filled Flag,
# Filled Item, Restricted Items, Item Addr, Text Addr, Text2 Addr,
# Special (map# or inventory addr), Name, Swapped Flag]}
# (For random start, [6]=Type, [7]=XY_spawn_data)
self.item_locations = {
# Jeweler
0: [2, 1, False, 0, [], "8d019", "8d19d", "", "8d260", "Jeweler Reward 1 "],
1: [3, 1, False, 0, [], "8d028", "8d1ba", "", "8d274", "Jeweler Reward 2 "],
2: [4, 1, False, 0, [], "8d037", "8d1d7", "", "8d288", "Jeweler Reward 3 "],
3: [5, 1, False, 0, [], "8d04a", "8d1f4", "", "8d29c", "Jeweler Reward 4 "],
4: [6, 1, False, 0, [], "8d059", "8d211", "", "8d2b0", "Jeweler Reward 5 "],
5: [7, 1, False, 0, [], "8d069", "8d2ea", "", "8d2c4", "Jeweler Reward 6 "],
# South Cape
6: [21, 1, False, 0, [], "F51D", "F52D", "F543", "", "South Cape: Bell Tower "],
7: [20, 1, False, 0, [], "4846e", "48479", "", "", "South Cape: Fisherman "], # text2 was 0c6a1
8: [26, 1, False, 0, [], "F59D", "F5AD", "F5C3", "", "South Cape: Lance's House "],
9: [23, 1, False, 0, [], "499e4", "49be5", "", "", "South Cape: Lola "],
10: [21, 2, False, 0, [64, 65, 66], "c830a", "Safe", b"\xE0\x00\x70\x00\x83\x00\x43", b"\x01", "South Cape: Dark Space "],
# Edward's
11: [30, 1, False, 0, [], "4c214", "4c299", "", "", "Edward's Castle: Hidden Guard "],
12: [30, 1, False, 0, [], "4d0ef", "4d141", "", "", "Edward's Castle: Basement "],
13: [32, 1, False, 0, [], "4d32f", "4d4b1", "", "", "Edward's Prison: Hamlet "], # text 4d5f4?
14: [32, 2, False, 0, [64, 65, 66], "c8637", "", "", b"\x0b", "Edward's Prison: Dark Space "],
# Underground Tunnel
15: [42, 1, False, 0, [], "1AFA9", "", "", "", "Underground Tunnel: Spike's Chest "],
16: [44, 1, False, 0, [], "1AFAE", "", "", "", "Underground Tunnel: Small Room Chest"],
17: [48, 1, False, 0, [], "1AFB3", "", "", "", "Underground Tunnel: Ribber's Chest "],
18: [49, 1, False, 0, [], "F61D", "F62D", "F643", "", "Underground Tunnel: Barrels "],
19: [47, 2, False, 0, [], "c8aa2", "Unsafe", b"\xA0\x00\xD0\x04\x83\x00\x74", b"\x12", "Underground Tunnel: Dark Space "], # Always open
# Itory
20: [51, 1, False, 0, [], "F69D", "F6AD", "F6C3", "", "Itory Village: Logs "],
21: [58, 1, False, 0, [], "4f375", "4f38d", "4f3a8", "", "Itory Village: Cave "],
22: [51, 2, False, 0, [64, 65, 66], "c8b34", "Safe", b"\x30\x04\x90\x00\x83\x00\x35", b"\x15", "Itory Village: Dark Space "],
# Moon Tribe
23: [62, 1, False, 0, [], "4fae1", "4faf9", "4fb16", "", "Moon Tribe: Cave "],
# Inca
24: [71, 1, False, 0, [], "1AFB8", "", "", "", "Inca Ruins: Diamond-Block Chest "],
25: [92, 1, False, 0, [], "1AFC2", "", "", "", "Inca Ruins: Broken Statues Chest "],
26: [83, 1, False, 0, [], "1AFBD", "", "", "", "Inca Ruins: Stone Lord Chest "],
27: [93, 1, False, 0, [], "1AFC6", "", "", "", "Inca Ruins: Slugger Chest "],
28: [76, 1, False, 0, [], "9c5bd", "9c614", "9c637", "", "Inca Ruins: Singing Statue "],
29: [96, 2, False, 0, [], "c9302", "Unsafe", b"\x10\x01\x90\x00\x83\x00\x32", b"\x28", "Inca Ruins: Dark Space 1 "], # Always open
30: [93, 2, False, 0, [], "c923b", "Unsafe", b"\xC0\x01\x50\x01\x83\x00\x32", b"\x26", "Inca Ruins: Dark Space 2 "],
31: [77, 2, False, 0, [], "c8db8", "", "", b"\x1e", "Inca Ruins: Final Dark Space "],
# Gold Ship
32: [100, 1, False, 0, [], "5965e", "5966e", "", "", "Gold Ship: Seth "],
# Diamond Coast
33: [102, 1, False, 0, [], "F71D", "F72D", "F743", "", "Diamond Coast: Jar "],
# Freejia
34: [121, 1, False, 0, [], "F79D", "F7AD", "F7C3", "", "Freejia: Hotel "],
35: [110, 1, False, 0, [], "5b6d8", "5b6e8", "", "", "Freejia: Creepy Guy "],
36: [110, 1, False, 0, [], "5cf9e", "5cfae", "5cfc4", "", "Freejia: Trash Can 1 "],
37: [110, 1, False, 0, [], "5cf3d", "5cf49", "", "", "Freejia: Trash Can 2 "], # text2 was 5cf5b
38: [115, 1, False, 0, [], "5b8b7", "5b962", "5b9ee", "", "Freejia: Snitch "], # text1 was @5b94d
39: [125, 2, False, 0, [64, 65, 66], "c96ce", "Safe", b"\x40\x00\xa0\x00\x83\x00\x11", b"\x34", "Freejia: Dark Space "],
# Diamond Mine
40: [134, 1, False, 0, [], "1AFD0", "", "", "", "Diamond Mine: Chest "],
41: [137, 1, False, 0, [], "5d7e4", "5d819", "5d830", "", "Diamond Mine: Trapped Laborer "],
42: [143, 1, False, 0, [], "aa777", "aa85c", "", "", "Diamond Mine: Laborer w/Elevator Key"], # text1 was aa811
43: [148, 1, False, 0, [], "5d4d2", "5d4eb", "5d506", "", "Diamond Mine: Morgue "],
44: [149, 1, False, 0, [], "aa757", "aa7ef", "", "", "Diamond Mine: Laborer w/Mine Key "], # text1 was aa7b4
45: [150, 1, False, 0, [], "5d2b0", "5d2da", "", "", "Diamond Mine: Sam "],
46: [136, 2, False, 0, [], "c9a87", "Unsafe", b"\xb0\x01\x70\x01\x83\x00\x32", b"\x40", "Diamond Mine: Appearing Dark Space "], # Always open
47: [131, 2, False, 0, [], "c98b0", "Unsafe", b"\xd0\x00\xc0\x00\x83\x00\x61", b"\x3d", "Diamond Mine: Dark Space at Wall "],
48: [142, 2, False, 0, [], "c9b49", "", "", b"\x42", "Diamond Mine: Dark Space behind Wall"],
# Sky Garden
49: [172, 1, False, 0, [], "1AFDD", "", "", "", "Sky Garden: (NE) Platform Chest "],
50: [173, 1, False, 0, [], "1AFD9", "", "", "", "Sky Garden: (NE) Blue Cyber Chest "],
51: [174, 1, False, 0, [], "1AFD5", "", "", "", "Sky Garden: (NE) Statue Chest "],
52: [180, 1, False, 0, [], "1AFE2", "", "", "", "Sky Garden: (SE) Dark Side Chest "],
53: [185, 1, False, 0, [], "1AFE7", "", "", "", "Sky Garden: (SW) Ramp Chest "],
54: [186, 1, False, 0, [], "1AFEC", "", "", "", "Sky Garden: (SW) Dark Side Chest "],
55: [194, 1, False, 0, [], "1AFF1", "", "", "", "Sky Garden: (NW) Top Chest "],
56: [194, 1, False, 0, [], "1AFF5", "", "", "", "Sky Garden: (NW) Bottom Chest "],
57: [170, 2, False, 0, [64, 65, 66], "c9d63", "Safe", b"\x90\x00\x70\x00\x83\x00\x22", b"\x4c", "Sky Garden: Dark Space (Foyer) "],
58: [169, 2, False, 0, [], "ca505", "Unsafe", b"\x70\x00\xa0\x00\x83\x00\x11", b"\x56", "Sky Garden: Dark Space (SE) "], # in the room
59: [183, 2, False, 0, [], "ca173", "", "", b"\x51", "Sky Garden: Dark Space (SW) "],
60: [195, 2, False, 0, [], "ca422", "Unsafe", b"\x20\x00\x70\x00\x83\x00\x44", b"\x54", "Sky Garden: Dark Space (NW) "],
# Seaside Palace
61: [202, 1, False, 0, [], "1AFFF", "", "", "", "Seaside Palace: Side Room Chest "],
62: [200, 1, False, 0, [], "1AFFA", "", "", "", "Seaside Palace: First Area Chest "],
63: [205, 1, False, 0, [], "1B004", "", "", "", "Seaside Palace: Second Area Chest "],
64: [206, 1, False, 0, [], "68af7", "68ea9", "68f02", "", "Seaside Palace: Buffy "],
65: [208, 1, False, 0, [], "6922d", "6939e", "693b7", "", "Seaside Palace: Coffin "], # text1 was 69377
66: [200, 2, False, 0, [64, 65, 66], "ca574", "Safe", b"\xf0\x02\x90\x00\x83\x00\x64", b"\x5a", "Seaside Palace: Dark Space "],
# Mu
67: [217, 1, False, 0, [], "1B012", "", "", "", "Mu: Empty Chest 1 "],
68: [220, 1, False, 0, [], "1B01B", "", "", "", "Mu: Empty Chest 2 "],
69: [225, 1, False, 0, [], "698be", "698d2", "", "", "Mu: Hope Statue 1 "],
70: [236, 1, False, 0, [], "69966", "69975", "", "", "Mu: Hope Statue 2 "],
71: [215, 1, False, 0, [], "1B00D", "", "", "", "Mu: Chest s/o Hope Room 2 "],
72: [214, 1, False, 0, [], "1B009", "", "", "", "Mu: Rama Chest N "],
73: [219, 1, False, 0, [], "1B016", "", "", "", "Mu: Rama Chest E "],
74: [218, 2, False, 0, [], "ca92d", "", "", b"\x60", "Mu: Open Dark Space "], # Always open
75: [228, 2, False, 0, [], "caa99", "", "", b"\x62", "Mu: Slider Dark Space "],
# Angel Village
76: [254, 1, False, 0, [], "F81D", "F82D", "F843", "", "Angel Village: Dance Hall "],
77: [255, 2, False, 0, [64, 65, 66], "caf67", "Safe", b"\x90\x01\xb0\x00\x83\x01\x12", b"\x6c", "Angel Village: Dark Space "],
# Angel Dungeon
78: [265, 1, False, 0, [], "1B020", "", "", "", "Angel Dungeon: Slider Chest "],
79: [271, 1, False, 0, [], "F89D", "F8AD", "F8C3", "", "Angel Dungeon: Ishtar's Room "],
80: [274, 1, False, 0, [], "1B02A", "", "", "", "Angel Dungeon: Puzzle Chest 1 "],
81: [274, 1, False, 0, [], "1B02E", "", "", "", "Angel Dungeon: Puzzle Chest 2 "],
82: [273, 1, False, 0, [], "1B025", "", "", "", "Angel Dungeon: Ishtar's Chest "],
# Watermia
83: [280, 1, False, 0, [], "F91D", "F92D", "F943", "", "Watermia: West Jar "],
85: [286, 1, False, 0, [], "7ad21", "7aede", "", "", "Watermia: Lance "], # text2 was 7afa7
86: [283, 1, False, 0, [], "F99D", "F9AD", "F9C3", "", "Watermia: Gambling House "],
87: [280, 1, False, 0, [], "79248", "79288", "792a1", "", "Watermia: Russian Glass "],
88: [282, 2, False, 0, [64, 65, 66], "cb644", "Safe", b"\x40\x00\xa0\x00\x83\x00\x11", b"\x7c", "Watermia: Dark Space "],
# Great Wall
89: [290, 1, False, 0, [], "7b5c5", "7b5d1", "", "", "Great Wall: Necklace 1 "],
90: [292, 1, False, 0, [], "7b625", "7b631", "", "", "Great Wall: Necklace 2 "],
91: [292, 1, False, 0, [], "1B033", "", "", "", "Great Wall: Chest 1 "],
92: [294, 1, False, 0, [], "1B038", "", "", "", "Great Wall: Chest 2 "],
93: [295, 2, False, 0, [], "cbb11", "Unsafe", b"\x60\x00\xc0\x02\x83\x20\x38", b"\x85", "Great Wall: Archer Dark Space "],
94: [297, 2, False, 0, [], "cbb80", "Unsafe", b"\x50\x01\x80\x04\x83\x00\x63", b"\x86", "Great Wall: Platform Dark Space "], # Always open
95: [300, 2, False, 0, [], "cbc60", "", "", b"\x88", "Great Wall: Appearing Dark Space "],
# Euro
96: [310, 1, False, 0, [], "FA1D", "FA2D", "FA43", "", "Euro: Alley "],
97: [310, 1, False, 0, [], "7c0b3", "7c0f3", "", "", "Euro: Apple Vendor "],
98: [320, 1, False, 0, [], "7e51f", "7e534", "7e54a", "", "Euro: Hidden House "],
99: [323, 1, False, 0, [], "7cd12", "7cd39", "7cd9b", "", "Euro: Store Item 1 "],
100: [323, 1, False, 0, [], "7cdf9", "7ce28", "7ce3e", "", "Euro: Store Item 2 "], # text2 was 7cedd
101: [321, 1, False, 0, [], "FA9D", "FAAD", "FAC3", "", "Euro: Shrine "],
102: [315, 1, False, 0, [], "7df58", "7e10a", "", "", "Euro: Ann "],
103: [325, 2, False, 0, [64, 65, 66], "cc0b0", "Safe", b"\xb0\x00\xb0\x00\x83\x00\x11", b"\x99", "Euro: Dark Space "],
# Mt Temple
104: [336, 1, False, 0, [], "1B03D", "", "", "", "Mt. Temple: Red Jewel Chest "],
105: [338, 1, False, 0, [], "1B042", "", "", "", "Mt. Temple: Drops Chest 1 "],
106: [342, 1, False, 0, [], "1B047", "", "", "", "Mt. Temple: Drops Chest 2 "],
107: [343, 1, False, 0, [], "1B04C", "", "", "", "Mt. Temple: Drops Chest 3 "],
108: [345, 1, False, 0, [], "1B051", "", "", "", "Mt. Temple: Final Chest "],
109: [332, 2, False, 0, [], "cc24f", "Unsafe", b"\xf0\x01\x10\x03\x83\x00\x44", b"\xa1", "Mt. Temple: Dark Space 1 "],
110: [337, 2, False, 0, [], "cc419", "Unsafe", b"\xc0\x07\xc0\x00\x83\x00\x28", b"\xa3", "Mt. Temple: Dark Space 2 "],
111: [343, 2, False, 0, [], "cc7b8", "", "", b"\xa7", "Mt. Temple: Dark Space 3 "],
# Natives'
112: [353, 1, False, 0, [], "FB1D", "FB2D", "FB43", "", "Natives' Village: Statue Room "],
113: [354, 1, False, 0, [], "893af", "8942a", "", "", "Natives' Village: Statue "],
114: [350, 2, False, 0, [64, 65, 66], "cca37", "Safe", b"\xc0\x01\x50\x00\x83\x00\x22", b"\xac", "Natives' Village: Dark Space "],
# Ankor Wat
115: [361, 1, False, 0, [], "1B056", "", "", "", "Ankor Wat: Ramp Chest "],
116: [370, 1, False, 0, [], "1B05B", "", "", "", "Ankor Wat: Flyover Chest "],
117: [378, 1, False, 0, [], "1B060", "", "", "", "Ankor Wat: U-Turn Chest "],
118: [382, 1, False, 0, [], "1B065", "", "", "", "Ankor Wat: Drop Down Chest "],
119: [389, 1, False, 0, [], "1B06A", "", "", "", "Ankor Wat: Forgotten Chest "],
120: [380, 1, False, 0, [], "89fa3", "89fbb", "", "", "Ankor Wat: Glasses Location "], # slow text @89fdc
121: [391, 1, False, 0, [], "89adc", "89af1", "89b07", "", "Ankor Wat: Spirit "], # item was 89b0d, text was 89e2e
122: [372, 2, False, 0, [], "cce92", "Unsafe", b"\x20\x04\x30\x03\x83\x00\x46", b"\xb6", "Ankor Wat: Garden Dark Space "], # Always open
123: [377, 2, False, 0, [], "cd0a2", "", "", b"\xb8", "Ankor Wat: Earthquaker Dark Space "],
124: [383, 2, False, 0, [], "cd1a7", "Unsafe", b"\xb0\x02\xc0\x01\x83\x00\x33", b"\xbb", "Ankor Wat: Drop Down Dark Space "], # Always open
# Dao
125: [400, 1, False, 0, [], "8b1b0", "", "", "", "Dao: Entrance Item 1 "],
126: [400, 1, False, 0, [], "8b1b5", "", "", "", "Dao: Entrance Item 2 "],
127: [400, 1, False, 0, [], "FB9D", "FBAD", "FBC3", "", "Dao: East Grass "],
128: [403, 1, False, 0, [], "8b016", "8b073", "8b090", "", "Dao: Snake Game "],
129: [400, 2, False, 0, [64, 65, 66], "cd3d0", "Safe", b"\x20\x00\x80\x00\x83\x00\x23", b"\xc3", "Dao: Dark Space "],
# Pyramid
130: [411, 1, False, 0, [], "8dcb7", "8e66c", "8e800", "", "Pyramid: Dark Space Top "], # text2 was 8e800
131: [412, 1, False, 0, [], "FC1D", "FC2D", "FC43", "", "Pyramid: Hidden Platform "],
132: [442, 1, False, 0, [], "8c7b2", "8c7c9", "", "", "Pyramid: Hieroglyph 1 "],
133: [422, 1, False, 0, [], "1B06F", "", "", "", "Pyramid: Room 2 Chest "],
134: [443, 1, False, 0, [], "8c879", "8c88c", "", "", "Pyramid: Hieroglyph 2 "],
135: [432, 1, False, 0, [], "1B079", "", "", "", "Pyramid: Room 3 Chest "],
136: [444, 1, False, 0, [], "8c921", "8c934", "", "", "Pyramid: Hieroglyph 3 "],
137: [439, 1, False, 0, [], "1B07E", "", "", "", "Pyramid: Room 4 Chest "],
138: [445, 1, False, 0, [], "8c9c9", "8c9dc", "", "", "Pyramid: Hieroglyph 4 "],
139: [428, 1, False, 0, [], "1B074", "", "", "", "Pyramid: Room 5 Chest "],
140: [446, 1, False, 0, [], "8ca71", "8ca84", "", "", "Pyramid: Hieroglyph 5 "],
141: [447, 1, False, 0, [], "8cb19", "8cb2c", "", "", "Pyramid: Hieroglyph 6 "],
142: [413, 2, True, 0, [], "cd570", "Unsafe", b"\xc0\x01\x90\x03\x83\x00\x44", b"\xcc", "Pyramid: Dark Space Bottom "], # Always open
# Babel
143: [461, 1, False, 0, [], "FC9D", "FCAD", "FCC3", "", "Babel: Pillow "],
144: [461, 1, False, 0, [], "99a4f", "99ae4", "99afe", "", "Babel: Force Field "], # item was 99a61
145: [461, 2, False, 0, [64, 65, 66], "ce09b", "Forced Unsafe", b"\x90\x07\xb0\x01\x83\x10\x28", b"\xdf", "Babel: Dark Space Bottom "],
146: [472, 2, False, 0, [64, 65, 66], "ce159", "Safe", b"\xb0\x02\xb0\x01\x83\x10\x23", b"\xe3", "Babel: Dark Space Top "],
# Jeweler's Mansion
147: [480, 1, False, 0, [], "1B083", "", "", "", "Jeweler's Mansion: Chest "],
# Mystic Statues
148: [101, 3, False, 0, [101, 102, 103, 104, 105], "", "", "", "", "Castoth Prize "],
149: [198, 3, False, 0, [100, 102, 103, 104, 105], "", "", "", "", "Viper Prize "],
150: [244, 3, False, 0, [100, 101, 103, 104, 105], "", "", "", "", "Vampires Prize "],
151: [302, 3, False, 0, [100, 101, 102, 104, 105], "", "", "", "", "Sand Fanger Prize "],
152: [448, 3, False, 0, [100, 101, 102, 103, 105], "", "", "", "", "Mummy Queen Prize "],
153: [479, 3, False, 0, [100, 101, 102, 103, 104], "", "", "", "", "Babel Prize "],
# Event Switches
500: [500, 4, True, 500, [], "", "", "", "", "Kara "],
501: [501, 4, True, 501, [], "", "", "", "", "Lilly "],
502: [502, 4, True, 502, [], "", "", "", "", "Moon Tribe: Spirits Healed "],
503: [503, 4, True, 503, [], "", "", "", "", "Inca: Castoth defeated "],
504: [504, 4, True, 504, [], "", "", "", "", "Freejia: Found Laborer "],
505: [505, 4, True, 505, [], "", "", "", "", "Neil's Memory Restored "],
506: [506, 4, True, 506, [], "", "", "", "", "Sky Garden: Map 82 NW Switch "],
507: [507, 4, True, 507, [], "", "", "", "", "Sky Garden: Map 82 NE Switch "],
508: [508, 4, True, 508, [], "", "", "", "", "Sky Garden: Map 82 SE Switch "],
509: [509, 4, True, 509, [], "", "", "", "", "Sky Garden: Map 84 Switch "],
510: [510, 4, True, 510, [], "", "", "", "", "Seaside: Fountain Purified "],
511: [511, 4, True, 511, [], "", "", "", "", "Mu: Water Lowered 1 "],
512: [512, 4, True, 512, [], "", "", "", "", "Mu: Water Lowered 2 "],
513: [513, 4, True, 513, [], "", "", "", "", "Angel: Puzzle Complete "],
514: [514, 4, True, 514, [], "", "", "", "", "Mt Kress: Drops used 1 "],
515: [515, 4, True, 515, [], "", "", "", "", "Mt Kress: Drops used 2 "],
516: [516, 4, True, 516, [], "", "", "", "", "Mt Kress: Drops used 3 "],
517: [517, 4, True, 517, [], "", "", "", "", "Pyramid: Hieroglyphs placed "],
518: [518, 4, True, 518, [], "", "", "", "", "Babel: Castoth defeated "],
519: [519, 4, True, 519, [], "", "", "", "", "Babel: Viper defeated "],
520: [520, 4, True, 520, [], "", "", "", "", "Babel: Vampires defeated "],
521: [521, 4, True, 521, [], "", "", "", "", "Babel: Sand Fanger defeated "],
522: [522, 4, True, 522, [], "", "", "", "", "Babel: Mummy Queen defeated "],
523: [523, 4, True, 523, [], "", "", "", "", "Mansion: Solid Arm defeated "],
# Misc
600: [600, 4, True, 600, [], "", "", "", "", "Freedan Access "],
601: [601, 4, True, 601, [], "", "", "", "", "Glitches "],
602: [602, 4, True, 602, [], "", "", "", "", "Early Firebird "],
603: [491, 4, True, 67, [], "", "", "", "", "Firebird "]
}
# World graph
# Format: { Region ID:
# Traversed_flag, [AccessibleRegions], type(0=other/misc,1=exterior,2=interior), [continentID,areaID,layer,MapID],
# 4: DS_access (0=no_access,1=any_DS,2=form_change_DS),
# 5: RegionName,
# 6: [ItemsToRemove],
# 7: ForceFormChange,
# 8: [AccessibleFromNodes],
# 9: [Accessible_DS_nodes],
# 10: [Accessible_Nodes_w_Logic],
# 11: [item_locations],
# 12: [origin_logic],
# 13: [dest_logic],
# 14: [origin_exits],
# 15: [dest_exits] }
self.graph = {
# Game Start
0: [False, [22], 0, [0,0,0,b"\x00"], 0, "Game Start", [], True, [], [], [], [], [], [], [], []],
# Jeweler
1: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Access", [], False, [], [], [], [], [], [], [], []],
2: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 1", [], False, [], [], [], [], [], [], [], []],
3: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 2", [], False, [], [], [], [], [], [], [], []],
4: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 3", [], False, [], [], [], [], [], [], [], []],
5: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 4", [], False, [], [], [], [], [], [], [], []],
6: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 5", [], False, [], [], [], [], [], [], [], []],
7: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 6", [], False, [], [], [], [], [], [], [], []],
8: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 7", [], False, [], [], [], [], [], [], [], []],
# Overworld Menus
10: [False, [20,30,50,60,63], 0, [1,0,0,b"\x00"], 0, "Overworld: SW Continent", [], True, [], [], [], [], [], [], [], []],
11: [False, [102,110,133,160,162], 0, [2,0,0,b"\x00"], 0, "Overworld: SE Continent", [], True, [], [], [], [], [], [], [], []],
12: [False, [250,280,290], 0, [3,0,0,b"\x00"], 0, "Overworld: NE Continent", [], True, [], [], [], [], [], [], [], []],
13: [False, [310,330,350,360], 0, [4,0,0,b"\x00"], 0, "Overworld: N Continent", [], True, [], [], [], [], [], [], [], []],
14: [False, [400,410], 0, [5,0,0,b"\x00"], 0, "Overworld: NW Continent", [], True, [], [], [], [], [], [], [], []],
# Passage Menus
15: [False, [], 0, [0,0,0,b"\x00"], 0, "Passage: Seth", [], True, [], [], [], [], [], [], [], []],
16: [False, [], 0, [0,0,0,b"\x00"], 0, "Passage: Moon Tribe", [], True, [], [], [], [], [], [], [], []],
17: [False, [], 0, [0,0,0,b"\x00"], 0, "Passage: Neil", [], True, [], [], [], [], [], [], [], []],
# South Cape
20: [False, [1,10], 1, [1,1,0,b"\x00"], 0, "South Cape: Main Area", [], False, [], [], [], [], [], [], [], []],
21: [False, [20], 1, [1,1,0,b"\x00"], 0, "South Cape: School Roof", [], False, [], [], [], [], [], [], [], []],
22: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: School", [], False, [], [], [], [], [], [], [], []],
23: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Will's House", [], False, [], [], [], [], [], [], [], []],
24: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: East House", [], False, [], [], [], [], [], [], [], []],
25: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Seth's House", [], False, [], [], [], [], [], [], [], []],
26: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Lance's House", [], False, [], [], [], [], [], [], [], []],
27: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Erik's House", [], False, [], [], [], [], [], [], [], []],
28: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Seaside Cave", [], False, [], [], [], [], [], [], [], []],
# Edward's / Prison
30: [False, [10], 1, [1,2,0,b"\x00"], 0, "Edward's Castle: Main Area", [], False, [], [], [], [], [], [], [], []],
31: [False, [30], 1, [1,2,0,b"\x00"], 0, "Edward's Castle: Behind Guard", [], False, [], [], [], [], [], [], [], []],
32: [False, [], 2, [1,2,0,b"\x00"], 0, "Edward's Prison: Will's Cell", [2], False, [], [], [], [], [], [], [], []],
33: [False, [], 2, [1,2,0,b"\x00"], 0, "Edward's Prison: Prison Main", [2], False, [], [], [], [], [], [], [], []],
# Underground Tunnel
40: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 12", [], False, [], [], [], [], [], [], [], []],
41: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 13", [], False, [], [], [], [], [], [], [], []],
42: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 14", [], False, [], [], [], [], [], [], [], []],
43: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 15", [], False, [], [], [], [], [], [], [], []],
44: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 16", [], False, [], [], [], [], [], [], [], []],
45: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 17 (entrance)", [], False, [], [], [], [], [], [], [], []],
46: [False, [45], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 17 (exit open)", [], False, [], [], [], [], [], [], [], []],
47: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 18 (before bridge)", [], False, [], [], [], [], [], [], [], []],
48: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 18 (after bridge)", [], False, [], [], [], [], [], [], [], []],
49: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Exit", [], True, [], [], [], [], [], [], [], []],
# Itory
50: [False, [10], 1, [1,3,0,b"\x00"], 0, "Itory: Entrance", [9], False, [], [], [], [], [], [], [], []],
51: [False, [50], 1, [1,3,0,b"\x00"], 0, "Itory: Main Area", [], False, [], [], [], [], [], [], [], []],
52: [False, [], 1, [1,3,0,b"\x00"], 0, "Itory: Lilly's Back Porch", [], False, [], [], [], [], [], [], [], []],
53: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: West House", [], False, [], [], [], [], [], [], [], []],
54: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: North House", [], False, [], [], [], [], [], [], [], []],
55: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: Lilly's House", [23], False, [], [], [], [], [], [], [], []],
56: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: Cave", [], False, [], [], [], [], [], [], [], []],
57: [False, [56], 2, [1,3,0,b"\x00"], 0, "Itory: Cave (behind false wall)", [], False, [], [], [], [], [], [], [], []],
58: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: Cave (secret room)", [], False, [], [], [], [], [], [], [], []],
59: [False, [55,501], 0, [1,3,0,b"\x00"], 0, "Itory: Got Lilly", [], False, [], [], [], [], [], [], [], []],
# Moon Tribe / Inca Entrance
60: [False, [10], 1, [1,4,0,b"\x00"], 0, "Moon Tribe: Main Area", [25], False, [], [], [], [], [], [], [], []],
61: [False, [], 2, [1,4,0,b"\x00"], 0, "Moon Tribe: Cave", [], False, [], [], [], [], [], [], [], []],
62: [False, [61], 2, [1,4,0,b"\x00"], 0, "Moon Tribe: Cave (Pedestal)", [], False, [], [], [], [], [], [], [], []],
63: [False, [10], 1, [1,5,0,b"\x00"], 0, "Inca: Entrance", [], False, [], [], [], [], [], [], [], []],
64: [False, [60,502], 0, [1,4,0,b"\x00"], 0, "Moon Tribe: Spirits Awake", [], False, [], [], [], [], [], [], [], []],
# Inca Ruins
70: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (NE)", [], False, [], [], [], [], [], [], [], []],
71: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (NW)", [], False, [], [], [], [], [], [], [], []],
72: [False, [70,73], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (N)", [], False, [], [], [], [], [], [], [], []],
73: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (center)", [], False, [], [], [], [], [], [], [], []],
74: [False, [72], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SW)", [], False, [], [], [], [], [], [], [], []],
75: [False, [72,99], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SE)", [], False, [], [], [], [], [], [], [], []],
76: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (statue head)", [], False, [], [], [], [], [], [], [], []],
77: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 30 (first area)", [3, 4], False, [], [], [], [], [], [], [], []],
78: [False, [77], 2, [1,5,0,b"\x00"], 0, "Inca: Map 30 (second area)", [], False, [], [], [], [], [], [], [], []],
79: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 31", [], False, [], [], [], [], [], [], [], []],
80: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 32 (entrance)", [], False, [], [], [], [], [], [], [], []],
81: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 32 (behind statue)", [], False, [], [], [], [], [], [], [], []],
82: [False, [83], 2, [1,5,0,b"\x00"], 0, "Inca: Map 33 (entrance)", [], False, [], [], [], [], [], [], [], []],
83: [False, [82], 2, [1,5,0,b"\x00"], 0, "Inca: Map 33 (over ramp)", [], False, [], [], [], [], [], [], [], []], # Need to prevent softlocks here
84: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 34", [], False, [], [], [], [], [], [], [], []],
85: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 35 (entrance)", [], False, [], [], [], [], [], [], [], []],
86: [False, [85], 2, [1,5,0,b"\x00"], 0, "Inca: Map 35 (over ramp)", [], False, [], [], [], [], [], [], [], []],
87: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 36 (main)", [8], False, [], [], [], [], [], [], [], []],
88: [False, [87], 2, [1,5,0,b"\x00"], 0, "Inca: Map 36 (exit opened)", [], False, [], [], [], [], [], [], [], []],
89: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 37 (main area)", [7], False, [], [], [], [], [], [], [], []],
90: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 37 (tile bridge)", [], False, [], [], [], [], [], [], [], []], # Check for potential softlock?
91: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (south section)", [], False, [], [], [], [], [], [], [], []],
92: [False, [91], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (behind statues)", [], False, [], [], [], [], [], [], [], []],
93: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (north section)", [], False, [], [], [], [], [], [], [], []],
94: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 39", [], False, [], [], [], [], [], [], [], []],
95: [False, [96], 2, [1,5,0,b"\x00"], 0, "Inca: Map 40 (entrance)", [], False, [], [], [], [], [], [], [], []],
96: [False, [95], 2, [1,5,0,b"\x00"], 0, "Inca: Map 40 (past tiles)", [], False, [], [], [], [], [], [], [], []],
97: [False, [98,503], 2, [1,5,0,b"\x00"], 0, "Inca: Boss Room", [], True, [], [], [], [], [], [], [], []], # might need to add an exit for this
98: [False, [97], 2, [1,5,0,b"\x00"], 0, "Inca: Behind Boss Room", [], False, [], [], [], [], [], [], [], []],
99: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SE door)", [], False, [], [], [], [], [], [], [], []],
# Gold Ship / Diamond Coast
100: [False, [104], 1, [1,5,0,b"\x00"], 0, "Gold Ship: Deck", [], False, [], [], [], [], [], [], [], []],
101: [False, [], 2, [1,5,0,b"\x00"], 0, "Gold Ship: Interior", [], False, [], [], [], [], [], [], [], []],
102: [False, [11], 1, [2,6,0,b"\x00"], 0, "Diamond Coast: Main Area", [], False, [], [], [], [], [], [], [], []],
103: [False, [], 2, [2,6,0,b"\x00"], 0, "Diamond Coast: House", [], False, [], [], [], [], [], [], [], []],
104: [False, [], 0, [1,5,0,b"\x00"], 0, "Gold Ship: Crow's Nest Passage", [], False, [], [], [], [], [], [], [], []],
# Freejia
110: [False, [11], 1, [2,7,0,b"\x00"], 0, "Freejia: Main Area", [], False, [], [], [], [], [], [], [], []],
111: [False, [1, 110], 1, [2,7,0,b"\x00"], 0, "Freejia: 2-story House Roof", [], False, [], [], [], [], [], [], [], []],
112: [False, [], 1, [2,7,0,b"\x00"], 0, "Freejia: Laborer House Roof", [], False, [], [], [], [], [], [], [], []],
113: [False, [110, 114], 1, [2,7,0,b"\x00"], 0, "Freejia: Labor Trade Roof", [], False, [], [], [], [], [], [], [], []],
114: [False, [110, 112], 1, [2,7,0,b"\x00"], 0, "Freejia: Back Alley", [], False, [], [], [], [], [], [], [], []],
115: [False, [110], 0, [2,7,0,b"\x00"], 0, "Freejia: Slaver", [], False, [], [], [], [], [], [], [], []],
116: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: West House", [], False, [], [], [], [], [], [], [], []],
117: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: 2-story House", [], False, [], [], [], [], [], [], [], []],
118: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Lovers' House", [], False, [], [], [], [], [], [], [], []],
119: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (common area)", [], False, [], [], [], [], [], [], [], []],
120: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (west room)", [], False, [], [], [], [], [], [], [], []],
121: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (east room)", [], False, [], [], [], [], [], [], [], []],
122: [False, [504], 2, [2,7,0,b"\x00"], 0, "Freejia: Laborer House", [], False, [], [], [], [], [], [], [], []],
123: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Messy House", [], False, [], [], [], [], [], [], [], []],
124: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Erik House", [], False, [], [], [], [], [], [], [], []],
125: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Dark Space House", [], False, [], [], [], [], [], [], [], []],
126: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Labor Trade House", [], False, [], [], [], [], [], [], [], []],
127: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Labor Market", [], False, [], [], [], [], [], [], [], []],
# Diamond Mine
130: [False, [131], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (entrance)", [], False, [], [], [], [], [], [], [], []],
131: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (behind barriers)", [], False, [], [], [], [], [], [], [], []],
132: [False, [131], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (false wall)", [], False, [], [], [], [], [], [], [], []],
133: [False, [11], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 62", [], False, [], [], [], [], [], [], [], []],
134: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 63 (main)", [], False, [], [], [], [], [], [], [], []],
135: [False, [134], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 63 (elevator)", [], False, [], [], [], [], [], [], [], []],
136: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 64 (main)", [], False, [], [], [], [], [], [], [], []],
137: [False, [136], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 64 (trapped laborer)", [], False, [], [], [], [], [], [], [], []],
138: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 65 (main)", [], False, [], [], [], [], [], [], [], []],
139: [False, [138], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 65 (behind ramp)", [], False, [], [], [], [], [], [], [], []],
140: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (elevator 1)", [], False, [], [], [], [], [], [], [], []],
141: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (elevator 2)", [], False, [], [], [], [], [], [], [], []],
142: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (Dark Space)", [], False, [], [], [], [], [], [], [], []],
143: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (laborer)", [], False, [], [], [], [], [], [], [], []],
144: [False, [145], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 67 (entrance)", [], False, [], [], [], [], [], [], [], []],
145: [False, [144], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 67 (exit)", [], False, [], [], [], [], [], [], [], []], # potential softlock?
146: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 68 (main)", [], False, [], [], [], [], [], [], [], []],
147: [False, [146], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 68 (door open)", [], False, [], [], [], [], [], [], [], []],
148: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 69", [], False, [], [], [], [], [], [], [], []],
149: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 70", [], False, [], [], [], [], [], [], [], []],
150: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 71", [], False, [], [], [], [], [], [], [], []],
# Neil's Cottage / Nazca
160: [False, [11], 2, [2,9,0,b"\x00"], 0, "Neil's Cottage", [13], False, [], [], [], [], [], [], [], []],
161: [False, [17,160,505], 2, [2,9,0,b"\x00"], 0, "Neil's Cottage: Neil", [], False, [], [], [], [], [], [], [], []],
162: [False, [11], 1, [2,10,0,b"\x00"], 0, "Nazca Plain", [], False, [], [], [], [], [], [], [], []],
# Sky Garden
167: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (SE)", [], False, [], [], [], [], [], [], [], []],
168: [False, [181], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (north)", [], False, [], [], [], [], [], [], [], []],
169: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 86 (DS Room)", [], False, [], [], [], [], [], [], [], []],
170: [False, [], 1, [2,10,0,b"\x00"], 0, "Sky Garden: Foyer", [14, 14, 14, 14], False, [], [], [], [], [], [], [], []],
171: [False, [], 1, [2,10,0,b"\x00"], 0, "Sky Garden: Boss Entrance", [], False, [], [], [], [], [], [], [], []],
172: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 77 (main)", [], False, [], [], [], [], [], [], [], []],
173: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 77 (SW)", [], False, [], [], [], [], [], [], [], []],
174: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 77 (SE)", [], False, [], [], [], [], [], [], [], []],
175: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 78", [], False, [], [], [], [], [], [], [], []],
176: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 79 (main)", [], False, [], [], [], [], [], [], [], []],
177: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 79 (center)", [], False, [], [], [], [], [], [], [], []],
178: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 79 (behind barrier)", [], False, [], [], [], [], [], [], [], []],
179: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 80 (north)", [], False, [], [], [], [], [], [], [], []],
180: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 80 (south)", [], False, [], [], [], [], [], [], [], []],
181: [False, [168], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (main)", [], False, [], [], [], [], [], [], [], []],
182: [False, [181], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (west)", [], False, [], [], [], [], [], [], [], []],
183: [False, [182], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (Dark Space cage)", [], False, [], [], [], [], [], [], [], []],
184: [False, [182], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (SE platform)", [], False, [], [], [], [], [], [], [], []],
185: [False, [182], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (SW platform)", [], False, [], [], [], [], [], [], [], []],
186: [False, [506], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (north)", [], False, [], [], [], [], [], [], [], []], # deal with switches
187: [False, [508], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (south)", [], False, [], [], [], [], [], [], [], []],
188: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (NE)", [], False, [], [], [], [], [], [], [], []],
189: [False, [188,507], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (switch cage)", [], False, [], [], [], [], [], [], [], []],
190: [False, [191], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (NE)", [], False, [], [], [], [], [], [], [], []],
191: [False, [190, 192], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (NW)", [], False, [], [], [], [], [], [], [], []],
192: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (center)", [], False, [], [], [], [], [], [], [], []],
193: [False, [194], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (SW)", [], False, [], [], [], [], [], [], [], []],
194: [False, [167], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (chests)", [], False, [], [], [], [], [], [], [], []],
195: [False, [196], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (main)", [], False, [], [], [], [], [], [], [], []],
196: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (NE)", [], False, [], [], [], [], [], [], [], []],
197: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (behind statue)", [], False, [], [], [], [], [], [], [], []],
198: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Boss Room", [], True, [], [], [], [], [], [], [], []],
199: [False, [197,509], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (statue)", [], False, [], [], [], [], [], [], [], []],
# Seaside Palace
200: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1", [16], False, [], [], [], [], [], [], [], []],
201: [False, [200], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 (door unlocked)", [], False, [], [], [], [], [], [], [], []],
202: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 NE Room", [], False, [], [], [], [], [], [], [], []],
203: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 NW Room", [], False, [], [], [], [], [], [], [], []],
204: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 SE Room", [], False, [], [], [], [], [], [], [], []],
205: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 2", [], False, [], [], [], [], [], [], [], []],
206: [False, [200], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Buffy", [], False, [], [], [], [], [], [], [], []],
207: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 2 SW Room", [], False, [], [], [], [], [], [], [], []],
208: [False, [205], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Coffin", [], False, [], [], [], [], [], [], [], []],
209: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Fountain", [17], False, [], [], [], [], [], [], [], []],
210: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Mu Passage", [16], False, [], [], [], [], [], [], [], []],
211: [False, [210], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Mu Passage (door unlocked)", [], False, [], [], [], [], [], [], [], []],
# Mu
212: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 95 (top)", [], False, [], [], [], [], [], [], [], []],
213: [False, [212], 2, [3,12,1,b"\x00"], 0, "Mu: Map 95 (middle E)", [], False, [], [], [], [], [], [], [], []],
214: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 95 (middle W)", [], False, [], [], [], [], [], [], [], []],
215: [False, [213], 2, [3,12,2,b"\x00"], 0, "Mu: Map 95 (bottom E)", [], False, [], [], [], [], [], [], [], []],
216: [False, [214], 2, [3,12,2,b"\x00"], 0, "Mu: Map 95 (bottom W)", [], False, [], [], [], [], [], [], [], []],
217: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 96 (top)", [], False, [], [], [], [], [], [], [], []],
218: [False, [217], 2, [3,12,1,b"\x00"], 0, "Mu: Map 96 (middle)", [], False, [], [], [], [], [], [], [], []],
219: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 96 (bottom)", [], False, [], [], [], [], [], [], [], []],
220: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 97 (top main)", [], False, [], [], [], [], [], [], [], []],
221: [False, [222, 223], 2, [3,12,0,b"\x00"], 0, "Mu: Map 97 (top island)", [], False, [], [], [], [], [], [], [], []],
222: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 97 (middle NE)", [], False, [], [], [], [], [], [], [], []],
223: [False, [221], 2, [3,12,1,b"\x00"], 0, "Mu: Map 97 (middle SW)", [], False, [], [], [], [], [], [], [], []],
224: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 97 (bottom)", [], False, [], [], [], [], [], [], [], []],
225: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 98 (top S)", [], False, [], [], [], [], [], [], [], []],
226: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 98 (top N)", [], False, [], [], [], [], [], [], [], []],
227: [False, [226], 2, [3,12,1,b"\x00"], 0, "Mu: Map 98 (middle E)", [], False, [], [], [], [], [], [], [], []],
228: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 98 (middle W)", [], False, [], [], [], [], [], [], [], []],
229: [False, [227], 2, [3,12,2,b"\x00"], 0, "Mu: Map 98 (bottom E)", [], False, [], [], [], [], [], [], [], []],
230: [False, [228], 2, [3,12,2,b"\x00"], 0, "Mu: Map 98 (bottom W)", [], False, [], [], [], [], [], [], [], []],
231: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 99 (Room of Hope 1)", [18], False, [], [], [], [], [], [], [], []],
232: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 99 (Room of Hope 2)", [18], False, [], [], [], [], [], [], [], []],
233: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 100 (middle E)", [], False, [], [], [], [], [], [], [], []],
234: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 100 (middle W)", [], False, [], [], [], [], [], [], [], []],
235: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 100 (bottom)", [], False, [], [], [], [], [], [], [], []],
236: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 101 (top)", [], False, [], [], [], [], [], [], [], []],
237: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 101 (middle W)", [], False, [], [], [], [], [], [], [], []],
238: [False, [236], 2, [3,12,1,b"\x00"], 0, "Mu: Map 101 (middle E)", [], False, [], [], [], [], [], [], [], []],
239: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 101 (bottom)", [], False, [], [], [], [], [], [], [], []],
240: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 102 (pedestals)", [19, 19], False, [], [], [], [], [], [], [], []],
241: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 102 (statues placed)", [], False, [], [], [], [], [], [], [], []], # might need an exit for this
242: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 102 (statue get)", [], False, [], [], [], [], [], [], [], []],
243: [False, [244], 2, [3,12,0,b"\x00"], 0, "Mu: Boss Room (entryway)", [], False, [], [], [], [], [], [], [], []], # Might need to add an exit for this?
244: [False, [242,243], 2, [3,12,0,b"\x00"], 0, "Mu: Boss Room (main)", [], True, [], [], [], [], [], [], [], []],
245: [False, [212], 2, [3,12,0,b"\x00"], 0, "Mu: Map 95 (top, Slider exit)", [], False, [], [], [], [], [], [], [], []],
246: [False, [226], 2, [3,12,0,b"\x00"], 0, "Mu: Map 98 (top, Slider exit)", [], False, [], [], [], [], [], [], [], []],
247: [False, [231,511], 0, [3,12,0,b"\x00"], 0, "Mu: Water lowered 1", [], False, [], [], [], [], [], [], [], []],
248: [False, [232,512], 0, [3,12,0,b"\x00"], 0, "Mu: Water lowered 2", [], False, [], [], [], [], [], [], [], []],
# Angel Village
250: [False, [12], 1, [3,13,0,b"\x00"], 0, "Angel Village: Outside", [], True, [], [], [], [], [], [], [], []],
251: [False, [1], 2, [3,13,0,b"\x00"], 0, "Angel Village: Underground", [], False, [], [], [], [], [], [], [], []],
252: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Room 1", [], False, [], [], [], [], [], [], [], []],
253: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Room 2", [], False, [], [], [], [], [], [], [], []],
254: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Dance Hall", [], False, [], [], [], [], [], [], [], []],
255: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: DS Room", [], False, [], [], [], [], [], [], [], []],
#256: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Room 3", [], False, [], [], [], [], [], [], [], []],
# Angel Dungeon
260: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 109", [], False, [], [], [], [], [], [], [], []],
261: [False, [278], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 110 (main)", [], False, [], [], [], [], [], [], [], []],
262: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 111", [], False, [], [], [], [], [], [], [], []],
263: [False, [279], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (main)", [], False, [], [], [], [], [], [], [], []],
264: [False, [263], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (slider)", [], False, [], [], [], [], [], [], [], []],
265: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (alcove)", [], False, [], [], [], [], [], [], [], []],
266: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 113", [], False, [], [], [], [], [], [], [], []],
267: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 114 (main)", [], False, [], [], [], [], [], [], [], []],
268: [False, [267], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 114 (slider exit)", [], False, [], [], [], [], [], [], [], []],
269: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 115 (main)", [], False, [], [], [], [], [], [], [], []],
270: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (portrait room)", [], False, [], [], [], [], [], [], [], []],
271: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (side room)", [], False, [], [], [], [], [], [], [], []],
272: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (Ishtar's room)", [], False, [], [], [], [], [], [], [], []],
273: [False, [272], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (Ishtar's chest)", [], False, [], [], [], [], [], [], [], []],
274: [False, [513], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Puzzle Room", [], False, [], [], [], [], [], [], [], []],
275: [False, [265], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (alcove slider)", [], False, [], [], [], [], [], [], [], []],
276: [False, [277], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 115 (slider exit)", [], False, [], [], [], [], [], [], [], []],
277: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 115 (foyer)", [], False, [], [], [], [], [], [], [], []],
278: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 110 (past Draco)", [], False, [], [], [], [], [], [], [], []],
279: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (past Draco)", [], False, [], [], [], [], [], [], [], []],
# Watermia
280: [False, [12], 1, [3,14,0,b"\x00"], 0, "Watermia: Main Area", [24], False, [], [], [], [], [], [], [], []],
#281: [False, [15,280], 0, [3,14,0,b"\x00"], 0, "Watermia: Bridge Man", [], False, [], [], [], [], [], [], [], []],
282: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: DS House", [], False, [], [], [], [], [], [], [], []],
283: [False, [1], 2, [3,14,0,b"\x00"], 0, "Watermia: Gambling House", [], False, [], [], [], [], [], [], [], []],
284: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: West House", [], False, [], [], [], [], [], [], [], []],
285: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: East House", [], False, [], [], [], [], [], [], [], []],
286: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: Lance's House", [], False, [], [], [], [], [], [], [], []],
287: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: NW House", [], False, [], [], [], [], [], [], [], []],
288: [False, [280], 0, [3,14,0,b"\x00"], 0, "Watermia: Stablemaster", [], True, [], [], [], [], [], [], [], []],
# Great Wall
290: [False, [12], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 130", [], False, [], [], [], [], [], [], [], []],
291: [False, [292], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 131 (NW)", [], False, [], [], [], [], [], [], [], []],
292: [False, [293], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 131 (S)", [], False, [], [], [], [], [], [], [], []],
293: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 131 (NE)", [], False, [], [], [], [], [], [], [], []],
294: [False, [296], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 133 (W)", [], False, [], [], [], [], [], [], [], []],
295: [False, [296], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 133 (center)", [], False, [], [], [], [], [], [], [], []],
296: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 133 (E)", [], False, [], [], [], [], [], [], [], []],
297: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 134", [], False, [], [], [], [], [], [], [], []],
298: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 135 (W)", [], False, [], [], [], [], [], [], [], []],
299: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 135 (E)", [], False, [], [], [], [], [], [], [], []],
300: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 136 (W)", [], False, [], [], [], [], [], [], [], []],
301: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 136 (E)", [], False, [], [], [], [], [], [], [], []],
302: [False, [303], 2, [3,15,0,b"\x00"], 0, "Great Wall: Boss Room (entrance)", [], False, [], [], [], [], [], [], [], []],
303: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Boss Room (exit)", [], False, [], [], [], [], [], [], [], []],
# Euro
310: [False, [13], 1, [4,16,0,b"\x00"], 0, "Euro: Main Area", [24], False, [], [], [], [], [], [], [], []],
311: [False, [310], 0, [4,16,0,b"\x00"], 0, "Euro: Stablemaster", [], True, [], [], [], [], [], [], [], []],
312: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Rolek Company", [], False, [], [], [], [], [], [], [], []],
313: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: West House", [], False, [], [], [], [], [], [], [], []],
314: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Rolek Mansion", [40], False, [], [], [], [], [], [], [], []],
315: [False, [314], 0, [4,16,0,b"\x00"], 0, "Euro: Ann", [], False, [], [], [], [], [], [], [], []],
316: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Guest Room", [], False, [], [], [], [], [], [], [], []],
317: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Central House", [], False, [], [], [], [], [], [], [], []],
318: [False, [1], 2, [4,16,0,b"\x00"], 0, "Euro: Jeweler House", [], False, [], [], [], [], [], [], [], []],
319: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Twins House", [], False, [], [], [], [], [], [], [], []],
320: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Hidden House", [], False, [], [], [], [], [], [], [], []],
321: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Shrine", [], False, [], [], [], [], [], [], [], []],
322: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Explorer's House", [], False, [], [], [], [], [], [], [], []],
323: [False, [324], 2, [4,16,0,b"\x00"], 0, "Euro: Store Entrance", [], False, [], [], [], [], [], [], [], []],
324: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Store Exit", [], False, [], [], [], [], [], [], [], []],
325: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Dark Space House", [], False, [], [], [], [], [], [], [], []],
# Mt. Kress
330: [False, [13], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 160", [], False, [], [], [], [], [], [], [], []],
331: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 161 (E)", [], False, [], [], [], [], [], [], [], []],
332: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 161 (W)", [], False, [], [], [], [], [], [], [], []],
333: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (main)", [26], False, [], [], [], [], [], [], [], []],
334: [False, [333], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (S)", [], False, [], [], [], [], [], [], [], []],
335: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (NW)", [], False, [], [], [], [], [], [], [], []],
336: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (SE)", [], False, [], [], [], [], [], [], [], []],
337: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 163", [], False, [], [], [], [], [], [], [], []],
338: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 164", [], False, [], [], [], [], [], [], [], []],
339: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 165 (S)", [26], False, [], [], [], [], [], [], [], []],
340: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 165 (NE)", [26], False, [], [], [], [], [], [], [], []],
341: [False, [338], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 165 (NW)", [], False, [], [], [], [], [], [], [], []],
342: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 166", [], False, [], [], [], [], [], [], [], []],
343: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 167", [], False, [], [], [], [], [], [], [], []],
344: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 168", [], False, [], [], [], [], [], [], [], []],
345: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 169", [], False, [], [], [], [], [], [], [], []],
# Natives' Village
350: [False, [13], 1, [4,18,0,b"\x00"], 0, "Natives' Village: Main Area", [10], False, [], [], [], [], [], [], [], []],
351: [False, [350], 0, [4,18,0,b"\x00"], 0, "Natives' Village: Child Guide", [], True, [], [], [], [], [], [], [], []],
352: [False, [], 2, [4,18,0,b"\x00"], 0, "Natives' Village: West House", [], False, [], [], [], [], [], [], [], []],
353: [False, [], 2, [4,18,0,b"\x00"], 0, "Natives' Village: House w/Statues", [29], False, [], [], [], [], [], [], [], []],
354: [False, [353], 0, [4,18,0,b"\x00"], 0, "Natives' Village: Statues Awake", [], False, [], [], [], [], [], [], [], []],
# Ankor Wat
360: [False, [13], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 176", [], False, [], [], [], [], [], [], [], []],
361: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 177 (E)", [], False, [], [], [], [], [], [], [], []],
362: [False, [361], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 177 (W)", [], False, [], [], [], [], [], [], [], []],
363: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 178 (S)", [], False, [], [], [], [], [], [], [], []],
364: [False, [363], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 178 (center)", [], False, [], [], [], [], [], [], [], []],
365: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 178 (N)", [], False, [], [], [], [], [], [], [], []],
366: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 179 (E)", [], False, [], [], [], [], [], [], [], []],
367: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 179 (W)", [], False, [], [], [], [], [], [], [], []],
368: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 180", [], False, [], [], [], [], [], [], [], []],
369: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 181 (N)", [], False, [], [], [], [], [], [], [], []],
370: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 181 (center)", [], False, [], [], [], [], [], [], [], []],
371: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 181 (S)", [], False, [], [], [], [], [], [], [], []],
372: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 182", [], False, [], [], [], [], [], [], [], []],
373: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 183 (S)", [], False, [], [], [], [], [], [], [], []],
374: [False, [373], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 183 (NW)", [], False, [], [], [], [], [], [], [], []],
375: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 183 (NE)", [], False, [], [], [], [], [], [], [], []],
376: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 184 (S)", [], False, [], [], [], [], [], [], [], []],
377: [False, [376], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 184 (N)", [], False, [], [], [], [], [], [], [], []],
378: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 185", [], False, [], [], [], [], [], [], [], []],
379: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 186 (main)", [], False, [], [], [], [], [], [], [], []],
380: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 186 (NE)", [], False, [], [], [], [], [], [], [], []],
381: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 187 (main)", [], False, [], [], [], [], [], [], [], []],
382: [False, [381], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 187 (chest)", [], False, [], [], [], [], [], [], [], []],
383: [False, [381], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 187 (Dark Space)", [], False, [], [], [], [], [], [], [], []],
384: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (N bright)", [], False, [], [], [], [], [], [], [], []],
385: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (S bright)", [], False, [], [], [], [], [], [], [], []],
386: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 189 (floor S)", [], False, [], [], [], [], [], [], [], []],
387: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 189 (floor N)", [], False, [], [], [], [], [], [], [], []],
388: [False, [386], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 189 (platform)", [], False, [], [], [], [], [], [], [], []],
389: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 190 (E)", [], False, [], [], [], [], [], [], [], []],
390: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 190 (W)", [], False, [], [], [], [], [], [], [], []],
391: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 191", [], False, [], [], [], [], [], [], [], []],
392: [False, [384], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (N)", [], False, [], [], [], [], [], [], [], []],
393: [False, [385], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (S)", [], False, [], [], [], [], [], [], [], []],
# Dao
400: [False, [1,14], 1, [5,20,0,b"\x00"], 0, "Dao: Main Area", [], False, [], [], [], [], [], [], [], []],
401: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: NW House", [], False, [], [], [], [], [], [], [], []],
402: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: Neil's House", [], False, [], [], [], [], [], [], [], []],
403: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: Snake Game", [], False, [], [], [], [], [], [], [], []],
404: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: SW House", [], False, [], [], [], [], [], [], [], []],
405: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: S House", [], False, [], [], [], [], [], [], [], []],
406: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: SE House", [], False, [], [], [], [], [], [], [], []],
# Pyramid
410: [False, [14], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (main)", [], False, [], [], [], [], [], [], [], []],
411: [False, [410], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (behind orbs)", [], False, [], [], [], [], [], [], [], []],
412: [False, [413], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (hidden platform)", [], False, [], [], [], [], [], [], [], []],
413: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (bottom)", [], False, [], [], [], [], [], [], [], []],
414: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (boss entrance)", [], False, [], [], [], [], [], [], [], []],
415: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph room", [30, 31, 32, 33, 34, 35, 38], False, [], [], [], [], [], [], [], []],
416: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 206 (E)", [], False, [], [], [], [], [], [], [], []],
417: [False, [416], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 206 (W)", [], False, [], [], [], [], [], [], [], []],
418: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 207 (NE)", [], False, [], [], [], [], [], [], [], []],
419: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 207 (SW)", [], False, [], [], [], [], [], [], [], []],
420: [False, [421], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 208 (N)", [], False, [], [], [], [], [], [], [], []],
421: [False, [420], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 208 (S)", [], False, [], [], [], [], [], [], [], []],
422: [False, [423], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 209 (W)", [], False, [], [], [], [], [], [], [], []],
423: [False, [422,411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 209 (E)", [], False, [], [], [], [], [], [], [], []],
424: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 210", [], False, [], [], [], [], [], [], [], []],
425: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 211", [], False, [], [], [], [], [], [], [], []],
426: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (N)", [], False, [], [], [], [], [], [], [], []],
427: [False, [426], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (center)", [], False, [], [], [], [], [], [], [], []],
428: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (SE)", [], False, [], [], [], [], [], [], [], []],
429: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (SW)", [], False, [], [], [], [], [], [], [], []],
430: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 213", [], False, [], [], [], [], [], [], [], []],
431: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (NW)", [], False, [], [], [], [], [], [], [], []],
432: [False, [431], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (NE)", [], False, [], [], [], [], [], [], [], []],
433: [False, [431,434], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (SE)", [], False, [], [], [], [], [], [], [], []],
434: [False, [433], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (SW)", [], False, [], [], [], [], [], [], [], []],
435: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 215 (main)", [], False, [], [], [], [], [], [], [], []],
436: [False, [437], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 216 (N)", [], False, [], [], [], [], [], [], [], []],
437: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 216 (S)", [], False, [], [], [], [], [], [], [], []],
438: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 217 (W)", [], False, [], [], [], [], [], [], [], []],
439: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 217 (E)", [], False, [], [], [], [], [], [], [], []],
440: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 219 (W)", [], False, [], [], [], [], [], [], [], []],
441: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 219 (E)", [], False, [], [], [], [], [], [], [], []],
442: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 1", [], False, [], [], [], [], [], [], [], []],
443: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 2", [], False, [], [], [], [], [], [], [], []],
444: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 3", [], False, [], [], [], [], [], [], [], []],
445: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 4", [], False, [], [], [], [], [], [], [], []],
446: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 5", [], False, [], [], [], [], [], [], [], []],
447: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 6", [], False, [], [], [], [], [], [], [], []],
448: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Boss Room", [], True, [], [], [], [], [], [], [], []],
449: [False, [415,517], 0, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyphs Placed", [], False, [], [], [], [], [], [], [], []],
450: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 215 (past Killer 6)", [], False, [], [], [], [], [], [], [], []],
# Babel
460: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Foyer", [], False, [], [], [], [], [], [], [], []],
461: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 223 (bottom)", [], False, [], [], [], [], [], [], [], []],
462: [False, [461], 2, [6,22,0,b"\x00"], 0, "Babel: Map 223 (top)", [], False, [], [], [], [], [], [], [], []],
463: [False, [518,519],2, [6,22,0,b"\x00"], 0, "Babel: Map 224 (bottom)", [], False, [], [], [], [], [], [], [], []],
464: [False, [520,521],2, [6,22,0,b"\x00"], 0, "Babel: Map 224 (top)", [], False, [], [], [], [], [], [], [], []],
465: [False, [466], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (SW)", [], False, [], [], [], [], [], [], [], []],
466: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (NW)", [], False, [], [], [], [], [], [], [], []],
467: [False, [468], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (SE)", [], False, [], [], [], [], [], [], [], []],
468: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (NE)", [], False, [], [], [], [], [], [], [], []],
469: [False, [470], 2, [6,22,0,b"\x00"], 0, "Babel: Map 226 (bottom)", [], False, [], [], [], [], [], [], [], []],
470: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 226 (top)", [], False, [], [], [], [], [], [], [], []],
471: [False, [522], 2, [6,22,0,b"\x00"], 0, "Babel: Map 227 (bottom)", [], False, [], [], [], [], [], [], [], []],
472: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 227 (top)", [], False, [], [], [], [], [], [], [], []],
473: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Olman's Room", [], False, [], [], [], [], [], [], [], []],
474: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Castoth", [], False, [], [], [], [], [], [], [], []],
475: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Viper", [], False, [], [], [], [], [], [], [], []],
476: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Vampires", [], False, [], [], [], [], [], [], [], []],
477: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Sand Fanger", [], False, [], [], [], [], [], [], [], []],
478: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Mummy Queen", [], False, [], [], [], [], [], [], [], []],
479: [False, [473], 0, [6,22,0,b"\x00"], 0, "Babel: Statue Get", [], False, [], [], [], [], [], [], [], []],
# Jeweler's Mansion
480: [False, [], 2, [6,23,0,b"\x00"], 0, "Jeweler's Mansion: Main", [], False, [], [], [], [], [], [], [], []],
481: [False, [], 2, [6,23,0,b"\x00"], 0, "Jeweler's Mansion: Behind Psycho Slider", [], False, [], [], [], [], [], [], [], []],
482: [False, [523], 2, [6,23,0,b"\x00"], 0, "Jeweler's Mansion: Solid Arm", [], False, [], [], [], [], [], [], [], []],
# Game End
490: [False, [500], 0, [0,0,0,b"\x00"], 0, "<NAME>", [], False, [], [], [], [], [], [], [], []],
491: [False, [], 0, [0,0,0,b"\x00"], 0, "Firebird", [], False, [], [], [], [], [], [], [], []],
492: [False, [491], 0, [0,0,0,b"\x00"], 0, "Dark Gaia/End Game", [], False, [], [], [], [], [], [], [], []],
# Event Switches
500: [False, [], 0, [0,0,0,b"\x00"], 0, "Kara ", [], False, [], [], [], [], [], [], [], []],
501: [False, [], 0, [0,0,0,b"\x00"], 0, "Lilly ", [], False, [], [], [], [], [], [], [], []],
502: [False, [], 0, [0,0,0,b"\x00"], 0, "Moon Tribe: Spirits Healed ", [], False, [], [], [], [], [], [], [], []],
503: [False, [], 0, [0,0,0,b"\x00"], 0, "Inca: Castoth defeated ", [], False, [], [], [], [], [], [], [], []],
504: [False, [], 0, [0,0,0,b"\x00"], 0, "Freejia: Found Laborer ", [], False, [], [], [], [], [], [], [], []],
505: [False, [], 0, [0,0,0,b"\x00"], 0, "Neil's Memory Restored ", [], False, [], [], [], [], [], [], [], []],
506: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 82 NW Switch ", [], False, [], [], [], [], [], [], [], []],
507: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 82 NE Switch ", [], False, [], [], [], [], [], [], [], []],
508: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 82 SE Switch ", [], False, [], [], [], [], [], [], [], []],
509: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 84 Switch ", [], False, [], [], [], [], [], [], [], []],
510: [False, [], 0, [0,0,0,b"\x00"], 0, "Seaside: Fountain Purified ", [], False, [], [], [], [], [], [], [], []],
511: [False, [], 0, [0,0,0,b"\x00"], 0, "Mu: Water Lowered 1 ", [], False, [], [], [], [], [], [], [], []],
512: [False, [], 0, [0,0,0,b"\x00"], 0, "Mu: Water Lowered 2 ", [], False, [], [], [], [], [], [], [], []],
513: [False, [], 0, [0,0,0,b"\x00"], 0, "Angel: Puzzle Complete ", [], False, [], [], [], [], [], [], [], []],
514: [False, [333,335], 0, [0,0,0,b"\x00"], 0, "Mt Kress: Drops used 1 ", [], False, [], [], [], [], [], [], [], []],
515: [False, [339,340], 0, [0,0,0,b"\x00"], 0, "Mt Kress: Drops used 2 ", [], False, [], [], [], [], [], [], [], []],
516: [False, [340,341], 0, [0,0,0,b"\x00"], 0, "Mt Kress: Drops used 3 ", [], False, [], [], [], [], [], [], [], []],
517: [False, [], 0, [0,0,0,b"\x00"], 0, "Pyramid: Hieroglyphs placed ", [], False, [], [], [], [], [], [], [], []],
518: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Castoth defeated ", [], False, [], [], [], [], [], [], [], []],
519: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Viper defeated ", [], False, [], [], [], [], [], [], [], []],
520: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Vampires defeated ", [], False, [], [], [], [], [], [], [], []],
521: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Sand Fanger defeated ", [], False, [], [], [], [], [], [], [], []],
522: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Mummy Queen defeated ", [], False, [], [], [], [], [], [], [], []],
523: [False, [], 0, [0,0,0,b"\x00"], 0, "Mansion: Solid Arm defeated ", [], False, [], [], [], [], [], [], [], []],
# Misc
600: [False, [], 0, [0,0,0,b"\x00"], 0, "Freedan Access ", [], False, [], [], [], [], [], [], [], []],
601: [False, [], 0, [0,0,0,b"\x00"], 0, "Glitches ", [], False, [], [], [], [], [], [], [], []],
602: [False, [], 0, [0,0,0,b"\x00"], 0, "Early Firebird ", [], False, [], [], [], [], [], [], [], []],
INACCESSIBLE: [False, [], 0, [0,0,0,b"\x00"], 0, "Inaccessible", [], False, [], [], [], [], [], [], [], []]
}
# Define logical paths in dynamic graph
# Format: { ID: [Status(-1=restricted,0=locked,1=unlocked,2=forced_open), StartRegion, DestRegion, NeedFreedan, [[item1, qty1],[item2,qty2]...]]}
self.logic = {
# Jeweler Rewards
0: [0, 1, 2, False, [[1, gem[0]]]], # Jeweler Reward 1
1: [0, 1, 2, False, [[1, gem[0] - 2], [41, 1]]],
2: [0, 1, 2, False, [[1, gem[0] - 3], [42, 1]]],
3: [0, 1, 2, False, [[1, gem[0] - 5], [41, 1], [42, 1]]],
4: [0, 2, 3, False, [[1, gem[1]]]], # Jeweler Reward 2
5: [0, 2, 3, False, [[1, gem[1] - 2], [41, 1]]],
6: [0, 2, 3, False, [[1, gem[1] - 3], [42, 1]]],
7: [0, 2, 3, False, [[1, gem[1] - 5], [41, 1], [42, 1]]],
8: [0, 3, 4, False, [[1, gem[2]]]], # Jeweler Reward 3
9: [0, 3, 4, False, [[1, gem[2] - 2], [41, 1]]],
10: [0, 3, 4, False, [[1, gem[2] - 3], [42, 1]]],
11: [0, 3, 4, False, [[1, gem[2] - 5], [41, 1], [42, 1]]],
12: [0, 4, 5, False, [[1, gem[3]]]], # Jeweler Reward 4
13: [0, 4, 5, False, [[1, gem[3] - 2], [41, 1]]],
14: [0, 4, 5, False, [[1, gem[3] - 3], [42, 1]]],
15: [0, 4, 5, False, [[1, gem[3] - 5], [41, 1], [42, 1]]],
16: [0, 5, 6, False, [[1, gem[4]]]], # Jeweler Reward 5
17: [0, 5, 6, False, [[1, gem[4] - 2], [41, 1]]],
18: [0, 5, 6, False, [[1, gem[4] - 3], [42, 1]]],
19: [0, 5, 6, False, [[1, gem[4] - 5], [41, 1], [42, 1]]],
20: [0, 6, 7, False, [[1, gem[5]]]], # Jeweler Reward 6
21: [0, 6, 7, False, [[1, gem[5] - 2], [41, 1]]],
22: [0, 6, 7, False, [[1, gem[5] - 3], [42, 1]]],
23: [0, 6, 7, False, [[1, gem[5] - 5], [41, 1], [42, 1]]],
24: [0, 7, 8, False, [[1, gem[6]]]], # Jeweler Reward 7 (Mansion)
25: [0, 7, 8, False, [[1, gem[6] - 2], [41, 1]]],
26: [0, 7, 8, False, [[1, gem[6] - 3], [42, 1]]],
27: [0, 7, 8, False, [[1, gem[6] - 5], [41, 1], [42, 1]]],
# Inter-Continental Travel
30: [0, 28, 15, False, [[37, 1]]], # South Cape: Erik w/ Lola's Letter
31: [0, 102, 15, False, [[37, 1]]], # Coast: Turbo w/ Lola's Letter
32: [0, 280, 15, False, [[37, 1]]], # Watermia: Bridgeman w/ Lola's Letter
33: [0, 160, 161, False, [[13, 1]]], # Neil's: Neil w/ Memory Melody
34: [0, 314, 17, False, [[505, 1]]], # Euro: Neil w/ Memory restored
35: [0, 402, 17, False, [[505, 1]]], # Dao: Neil w/ Memory restored
36: [0, 60, 64, False, [[25, 1]]], # Moon Tribe healed w/ Teapot
37: [0, 170, 16, False, [[502, 1]]], # Sky Garden: Spirits w/ spirits healed
38: [0, 280, 288, False, [[24, 1]]], # Watermia: Stablemaster w/ Will
39: [0, 310, 311, False, [[24, 1]]], # Euro: Stablemaster w/ Will
40: [0, 350, 351, False, [[10, 1]]], # Natives': Child Guide w/ Large Roast
# Edward's / Tunnel
60: [0, 32, 33, False, [[2, 1]]], # Escape cell w/Prison Key
61: [0, 33, 32, False, [[2, 1]]], # Enter cell w/Prison Key
62: [0, 45, 46, False, [[501, 1]]], # Progression w/ Lilly
63: [0, 47, 48, True, []], # Activate Bridge w/ Freedan
# Itory
70: [0, 50, 51, False, [[9, 1]]], # Town appears w/ Lola's Melody
71: [0, 55, 59, False, [[23, 1]]], # Get Lilly w/ Necklace
72: [0, 56, 57, False, [[61, 1]]], # Cave w/ Psycho Dash
73: [0, 56, 57, False, [[62, 1]]], # Cave w/ Psycho Slide
74: [0, 56, 57, False, [[63, 1]]], # Cave w/ Spin Dash
# Moon Tribe
80: [0, 61, 62, False, [[61, 1]]], # Cave challenge w/ Psycho Dash
81: [0, 61, 62, False, [[62, 1]]], # Cave challenge w/ Psycho Slide
82: [0, 61, 62, False, [[63, 1]]], # Cave challenge w/ Spin Dash
# Inca / Gold Ship / Freejia
89: [0, 72, 99, False, [[601, 1]]], # Map 29 progression w/ glitches
90: [0, 77, 78, False, [[3, 1], [4, 1]]], # Map 30 progression w/ Inca Statues
91: [0, 80, 81, False, [[61, 1]]], # Map 32 progression w/ Psycho Dash
92: [0, 80, 81, False, [[62, 1]]], # Map 32 progression w/ Psycho Slider
93: [0, 80, 81, False, [[63, 1]]], # Map 32 progression w/ Spin Dash
94: [0, 85, 86, True, []], # Map 35 progression w/ Freedan
95: [0, 87, 88, False, [[8, 1]]], # Map 36 progression w/ Wind Melody
96: [0, 89, 90, False, [[7, 1]]], # Map 37 progression w/ Diamond Block
97: [0, 91, 92, False, [[61, 1]]], # Map 38 progression w/ Psycho Dash
98: [0, 91, 92, False, [[62, 1]]], # Map 38 progression w/ Psycho Slider
99: [0, 91, 92, False, [[63, 1]]], # Map 38 progression w/ Spin Dash
#100: [0, 100, 104, False, [[100, 1]]], # Gold Ship progression w/ Statue 1
101: [0, 110, 115, False, [[504, 1]]], # Freejia: Slaver item w/ Laborer Found
# Diamond Mine
110: [0, 131, 132, False, [[61, 1]]], # Map 61 false wall w/ Psycho Dash
111: [0, 131, 132, False, [[62, 1]]], # Map 61 false wall w/ Psycho Slider
112: [0, 131, 132, False, [[63, 1]]], # Map 61 false wall w/ Spin Dash
113: [0, 134, 135, False, [[15, 1]]], # Map 63 progression w/ Elevator Key
114: [0, 136, 137, False, [[61, 1]]], # Map 64 trapped laborer w/ Psycho Dash
115: [0, 136, 137, False, [[62, 1]]], # Map 64 trapped laborer w/ Psycho Slider
116: [0, 136, 137, False, [[63, 1]]], # Map 64 trapped laborer w/ Spin Dash
117: [0, 138, 139, False, [[63, 1]]], # Map 65 progression w/ Spin Dash
118: [0, 138, 139, True, [[64, 1]]], # Map 65 progression w/ Dark Friar
119: [0, 146, 147, False, [[11, 1], [12, 1]]], # Map 68 progression w/ mine keys
# Sky Garden
130: [0, 170, 171, False, [[14, 4]]], # Boss access w/ Crystal Balls
131: [0, 177, 178, True, [[64, 1]]], # Map 79 progression w/ Dark Friar
132: [0, 177, 178, True, [[67, 1]]], # Map 79 progression w/ Firebird
133: [0, 168, 182, False, [[506, 1]]], # Map 81 progression w/ switch 1
134: [0, 182, 183, False, [[507, 1]]], # Map 81 progression w/ switch 2
135: [0, 182, 184, False, [[61, 1]]], # Map 81 progression w/ Psycho Dash
136: [0, 182, 184, False, [[62, 1]]], # Map 81 progression w/ Psycho Dash
137: [0, 182, 184, False, [[63, 1]]], # Map 81 progression w/ Psycho Dash
138: [0, 184, 185, False, [[508, 1], [61, 1]]], # Map 81 progression w/ switch 3 & Psycho Dash
139: [0, 184, 185, False, [[508, 1], [62, 1]]], # Map 81 progression w/ switch 3 & Psycho Slider
140: [0, 184, 185, False, [[508, 1], [63, 1]]], # Map 81 progression w/ switch 3 & Spin Dash
141: [0, 181, 182, False, [[63, 1]]], # Map 81 progression w/ Spin Dash
142: [0, 181, 184, False, [[63, 1]]], # Map 81 progression w/ Spin Dash
143: [0, 182, 185, False, [[63, 1]]], # Map 81 progression w/ Spin Dash
144: [0, 188, 189, True, []], # Map 82 progression w/ Freedan
145: [0, 188, 189, False, [[601, 1]]], # Map 82 progression w/ Glitches
146: [0, 192, 190, False, [[63, 1]]], # Map 83 progression w/ Spin Dash
147: [0, 195, 199, True, [[64, 1]]], # Map 84 progression w/ Dark Friar
148: [0, 195, 199, True, [[67, 1]]], # Map 84 progression w/ Firebird
149: [0, 195, 199, True, [[65, 1]]], # Map 84 progression w/ Aura Barrier
150: [0, 197, 199, True, [[64, 1]]], # Map 84 progression w/ Dark Friar
151: [0, 197, 199, True, [[67, 1]]], # Map 84 progression w/ Firebird
152: [0, 170, 16, False, [[502, 1]]], # Moon Tribe passage w/ spirits healed
# Seaside Palace
160: [0, 205, 208, False, [[501, 1]]], # Coffin access w/ Lilly
161: [0, 209, 510, False, [[17, 1]]], # Purify fountain w/stone
162: [0, 200, 206, False, [[510, 1]]], # Buffy access w/ purified fountain
163: [0, 200, 201, False, [[16, 1]]], # Seaside to Mu w/ Mu key
164: [0, 210, 211, False, [[16, 1]]], # Mu to Seaside w/ Mu key
# Mu
170: [0, 212, 245, False, [[62, 1]]], # Map 95 progression w/ Psycho Slider
171: [0, 212, 213, False, [[511, 1]]], # Map 95 progression w/ water lowered 1
172: [0, 213, 215, False, [[512, 1]]], # Map 95 progression w/ water lowered 2
173: [0, 214, 216, False, [[512, 1]]], # Map 95 progression w/ water lowered 2
174: [0, 217, 218, False, [[511, 1]]], # Map 96 progression w/ water lowered 1
175: [0, 222, 221, True, [[511, 1], [64, 1]]], # Map 97 progression w/ water lowered 1 & Friar
176: [0, 222, 221, True, [[511, 1], [67, 1]]], # Map 97 progression w/ water lowered 1 & Firebird
177: [0, 222, 221, False, [[511, 1], [601, 1]]], # Map 97 progression w/ water lowered 1 & glitches
178: [0, 226, 227, False, [[511, 1]]], # Map 98 progression w/ water lowered 1
179: [0, 227, 229, False, [[512, 1]]], # Map 98 progression w/ water lowered 2
180: [0, 228, 230, False, [[512, 1]]], # Map 98 progression w/ water lowered 2
181: [0, 229, 230, False, [[62, 1]]], # Map 98 progression w/ Psycho Slider
182: [0, 230, 229, False, [[62, 1]]], # Map 98 progression w/ Psycho Slider
183: [0, 226, 246, False, [[62, 1]]], # Map 98 progression w/ Psycho Slider
184: [0, 237, 238, False, [[62, 1]]], # Map 101 progression w/ Psycho Slider
185: [0, 240, 241, False, [[19, 2]]], # Map 102 progression w/ Rama Statues
186: [0, 231, 247, False, [[18, 1]]], # Water lowered 1 w/ Hope Statue
187: [0, 232, 248, False, [[18, 2]]], # Water lowered 2 w/ Hope Statues
# Angel Dungeon
210: [0, 263, 264, False, [[62, 1]]], # Map 112 progression w/ Psycho Slider
211: [0, 265, 275, False, [[62, 1]]], # Map 112 backwards progression w/ Psycho Slider
212: [0, 267, 268, False, [[62, 1]]], # Map 114 progression w/ Psycho Slider
213: [0, 277, 276, False, [[62, 1]]], # Map 114 backwards progression w/ Psycho Slider
214: [0, 272, 273, False, [[513, 1]]], # Ishtar's chest w/ puzzle complete
# Great Wall
220: [0, 294, 295, False, [[601, 1]]], # Map 133 progression w/ glitches
221: [0, 296, 295, False, [[63, 1]]], # Map 133 progression w/ Spin Dash
222: [0, 296, 295, True, []], # Map 133 progression w/ Freedan
223: [0, 298, 299, True, [[64, 1]]], # Map 135 progression w/ Friar
224: [0, 298, 299, True, [[67, 1]]], # Map 135 progression w/ Firebird
225: [0, 299, 298, False, [[64, 1], [54, 2]]], # Map 135 progression w/ Friar III
227: [0, 300, 301, False, [[63, 1]]], # Map 136 progression w/ Spin Dash
228: [0, 295, 294, False, [[63, 1]]], # Map 133 progression w/ Spin Dash
# Euro
230: [0, 314, 315, False, [[40, 1]]], # Ann item w/ Apple
# Mt. Temple
240: [0, 331, 332, False, [[63, 1]]], # Map 161 progression w/ Spin Dash
241: [0, 332, 331, False, [[63, 1]]], # Map 161 backwards progression w/ Spin Dash
242: [0, 333, 514, False, [[26, 1]]], # Map 162 progression w/ Mushroom drops 1
243: [0, 335, 514, False, [[26, 1]]], # Map 162 progression w/ Mushroom drops 1 -- IS THIS TRUE?
244: [0, 339, 515, False, [[26, 2]]], # Map 162 progression w/ Mushroom drops 2
245: [0, 340, 515, False, [[26, 2]]], # Map 162 progression w/ Mushroom drops 2 -- IS THIS TRUE?
246: [0, 340, 516, False, [[26, 3]]], # Map 162 progression w/ Mushroom drops 3
247: [0, 341, 516, False, [[26, 3]]], # Map 162 progression w/ Mushroom drops 3 -- IS THIS TRUE?
# Natives'
250: [0, 353, 354, False, [[29, 1]]], # Statues awake w/ Gorgon Flower
# Ankor Wat
260: [-1, 361, 362, True, [[64, 1]]], # Map 177 progression w/ Friar
261: [0, 363, 364, False, [[63, 1]]], # Map 178 progression w/ Spin Dash
262: [0, 364, 365, False, [[62, 1]]], # Map 178 progression w/ Psycho Slider
263: [0, 365, 364, False, [[62, 1]]], # Map 178 progression w/ Psycho Slider
264: [0, 367, 366, False, [[63, 1]]], # Map 179 progression w/ Spin Dash
265: [0, 369, 370, False, [[62, 1]]], # Map 181 progression w/ Psycho Slider
266: [0, 370, 371, False, [[63, 1]]], # Map 181 progression w/ Spin Dash
267: [0, 373, 374, True, [[66, 1]]], # Map 183 progression w/ Earthquaker
268: [0, 373, 374, True, [[64, 1], [54, 2]]], # Map 183 progression w/ upgraded Friar
269: [0, 373, 374, True, [[64, 1], [601, 1]]], # Map 183 progression w/ Friar and glitches
270: [0, 373, 374, True, [[67, 1]]], # Map 183 progression w/ Firebird -- IS THIS TRUE?
271: [0, 376, 377, True, [[64, 1]]], # Map 184 progression w/ Friar
272: [0, 376, 377, True, [[36, 1]]], # Map 184 progression w/ Shadow
273: [0, 384, 392, False, [[28, 1]]], # Map 188 progression w/ Black Glasses
274: [0, 385, 393, False, [[28, 1]]], # Map 188 progression w/ Black Glasses
275: [0, 384, 392, False, [[601, 1]]], # Map 188 progression w/ glitches
276: [0, 385, 393, False, [[601, 1]]], # Map 188 progression w/ glitches
277: [0, 392, 393, False, [[62, 1]]], # Map 188 progression w/ Slider
278: [0, 393, 392, False, [[62, 1]]], # Map 188 progression w/ Slider
279: [0, 386, 387, False, [[62, 1]]], # Map 188 progression w/ Psycho Slider
280: [0, 387, 386, False, [[62, 1]]], # Map 188 progression w/ Psycho Slider
# Pyramid
290: [0, 410, 411, False, [[62, 1]]], # Map 204 progression w/ Slider
291: [0, 410, 411, False, [[63, 1]]], # Map 204 progression w/ Spin
292: [0, 410, 411, False, [[601, 1]]], # Map 204 progression w/ glitches
293: [0, 411, 412, False, [[36, 1]]], # Map 204 progression w/ Aura
294: [0, 411, 413, False, [[36, 1]]], # Map 204 progression w/ Aura
295: [0, 415, 449, False, [[30, 1], [31, 1], [32, 1], [33, 1], [34, 1], [35, 1], [38, 1]]],
# Boss door open w/ Hieroglyphs
296: [0, 416, 417, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
297: [0, 417, 416, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
298: [0, 418, 419, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
299: [0, 419, 418, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
300: [0, 426, 427, True, [[36, 1]]], # Map 212 progression w/ Aura
301: [0, 426, 427, True, [[66, 1]]], # Map 212 progression w/ Earthquaker
302: [0, 427, 428, True, [[36, 1]]], # Map 212 progression w/ Aura
303: [0, 427, 429, True, [[36, 1]]], # Map 212 progression w/ Aura
304: [0, 427, 429, True, [[66, 1]]], # Map 212 progression w/ Earthquaker
305: [0, 431, 432, False, [[63, 1]]], # Map 214 progression w/ Spin Dash
306: [0, 431, 434, True, [[36, 1]]], # Map 214 progression w/ Aura
307: [0, 431, 433, True, [[64, 1]]], # Map 214 progression w/ Friar
308: [0, 438, 439, False, [[63, 1]]], # Map 217 progression w/ Spin Dash
309: [0, 439, 438, False, [[63, 1]]], # Map 217 progression w/ Spin Dash
310: [0, 440, 441, False, [[63, 1]]], # Map 219 progression w/ Spin Dash
311: [0, 441, 440, False, [[63, 1]]], # Map 219 progression w/ Spin Dash
312: [0, 435, 450, False, [[6, 6], [50, 2], [51, 1], [52, 1]]],
# Killer 6 w/ herbs and upgrades
313: [0, 435, 450, True, [[64, 1], [54, 1]]],
# Killer 6 w/ Friar II
314: [0, 411, 414, False, [[517, 1]]], # Pyramid to boss w/hieroglyphs placed
# Babel / Mansion
320: [0, 461, 462, False, [[36, 1], [39, 1]]], # Map 219 progression w/ Aura and Ring
321: [0, 473, 479, False, [[522, 1]]], # Olman statue w/ Mummy Queen 2
322: [0, 473, 479, False, [[523, 1]]], # Olman statue w/ Solid Arm
323: [0, 480, 481, False, [[62, 1]]], # Mansion progression w/ Slider
# Endgame / Misc
400: [-1, 49, 490, False, [[20, 1]]], # Rescue Kara from Edward's w/ Magic Dust
401: [-1, 150, 490, False, [[20, 1]]], # Rescue Kara from Mine w/ Magic Dust
402: [-1, 270, 490, False, [[20, 1]]], # Rescue Kara from Angel w/ Magic Dust
403: [-1, 345, 490, False, [[20, 1]]], # Rescue Kara from Mt. Temple w/ Magic Dust
404: [-1, 391, 490, False, [[20, 1]]], # Rescue Kara from Ankor Wat w/ Magic Dust
405: [0, 490, 491, False, [[36, 1], [39, 1], [602, 1]]], # Early Firebird w/ Kara, Aura and Ring
406: [0, 490, 492, False, [[36, 1], [100, 0], [101, 0], [102, 0], [103, 0], [104, 0], [105, 0]]],
# Beat Game w/Mystic Statues and Aura
407: [0, 490, 492, False, [[36, 1], [106, self.statues_required]]] # Beat Game w/Mystic Statues and Aura (player choice variant)
}
# Define addresses for in-game spoiler text
self.spoiler_addresses = {
0: "4caf5", # Edward's Castle guard, top floor (4c947)
1: "4e9ff", # Itory elder (4e929)
2: "58ac0", # Gold Ship queen (589ff)
3: "5ad6b", # Man at Diamond Coast (5ab5c)
# 4: "5bfde", # Freejia laborer (5bfaa)
5: "69167", # Seaside Palace empty coffin (68feb)
6: "6dc97", # Ishtar's apprentice (6dc50)
7: "79c81", # Watermia, Kara's journal (79bf5)
8: "7d892", # Euro: Erasquez (7d79e)
9: "89b2a", # Ankor Wat, spirit (89abf)
10: "8ad0c", # Dao: girl with note (8acc5)
11: "99b8f" # Babel: spirit (99b2e)
}
# Define location text for in-game format
self.location_text = {
0: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
1: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
2: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
3: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
4: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
5: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
6: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
7: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
8: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
9: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
10: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
11: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x42\x80\xa3\xa4\x8b\x84", # "Edward's Castle"
12: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x42\x80\xa3\xa4\x8b\x84", # "Edward's Castle"
13: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x60\xa2\x88\xa3\x8e\x8d", # "Edward's Prison"
14: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x60\xa2\x88\xa3\x8e\x8d", # "Edward's Prison"
15: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
16: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
17: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
18: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
19: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
20: b"\x48\xa4\x8e\xa2\xa9", # "Itory"
21: b"\x48\xa4\x8e\xa2\xa9", # "Itory"
22: b"\x48\xa4\x8e\xa2\xa9", # "Itory"
23: b"\x4c\x8e\x8e\x8d\xac\x64\xa2\x88\x81\x84", # "Moon Tribe"
24: b"\x48\x8d\x82\x80", # "Inca"
25: b"\x48\x8d\x82\x80", # "Inca"
26: b"\x48\x8d\x82\x80", # "Inca"
27: b"\x48\x8d\x82\x80", # "Inca"
28: b"\x63\x88\x8d\x86\x88\x8d\x86\xac\xa3\xa4\x80\xa4\xa5\x84", # "Singing Statue"
29: b"\x48\x8d\x82\x80", # "Inca"
30: b"\x48\x8d\x82\x80", # "Inca"
31: b"\x48\x8d\x82\x80", # "Inca"
32: b"\x46\x8e\x8b\x83\xac\x63\x87\x88\xa0", # "Gold Ship"
33: b"\xd6\x0e\x42\x8e\x80\xa3\xa4", # "Diamond Coast"
34: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
35: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
36: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
37: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
38: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
39: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
40: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
41: b"\x4b\x80\x81\x8e\xa2\x84\xa2", # "Laborer"
42: b"\x4b\x80\x81\x8e\xa2\x84\xa2", # "Laborer"
43: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
44: b"\x4b\x80\x81\x8e\xa2\x84\xa2", # "Laborer"
45: b"\x63\x80\x8c", # "Sam"
46: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
47: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
48: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
49: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
50: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
51: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
52: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
53: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
54: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
55: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
56: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
57: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
58: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
59: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
60: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
61: b"\xd7\x32\xd7\x93", # "Seaside Palace"
62: b"\xd7\x32\xd7\x93", # "Seaside Palace"
63: b"\xd7\x32\xd7\x93", # "Seaside Palace"
64: b"\x41\xa5\x85\x85\xa9", # "Buffy"
65: b"\x42\x8e\x85\x85\x88\x8d", # "Coffin"
66: b"\xd7\x32\xd7\x93", # "Seaside Palace"
67: b"\x4c\xa5", # "Mu"
68: b"\x4c\xa5", # "Mu"
69: b"\x4c\xa5", # "Mu"
70: b"\x4c\xa5", # "Mu"
71: b"\x4c\xa5", # "Mu"
72: b"\x4c\xa5", # "Mu"
73: b"\x4c\xa5", # "Mu"
74: b"\x4c\xa5", # "Mu"
75: b"\x4c\xa5", # "Mu"
76: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
77: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
78: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
79: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
80: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
81: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
82: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
83: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
84: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
85: b"\x4b\x80\x8d\x82\x84", # "Lance"
86: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
87: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
88: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
89: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
90: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
91: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
92: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
93: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
94: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
95: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
96: b"\x44\xa5\xa2\x8e", # "Euro"
97: b"\x44\xa5\xa2\x8e", # "Euro"
98: b"\x44\xa5\xa2\x8e", # "Euro"
99: b"\x44\xa5\xa2\x8e", # "Euro"
100: b"\x44\xa5\xa2\x8e", # "Euro"
101: b"\x44\xa5\xa2\x8e", # "Euro"
102: b"\x40\x8d\x8d", # "Ann"
103: b"\x44\xa5\xa2\x8e", # "Euro"
104: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
105: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
106: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
107: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
108: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3\xac\x6e\x84\x8d\x83\x6f", # "Mt. Kress (end)"
109: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
110: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
111: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
112: b"\xd7\x21\x66\x88\x8b\x8b\x80\x86\x84", # "Native Village"
113: b"\x63\xa4\x80\xa4\xa5\x84", # "Statue"
114: b"\xd7\x21\x66\x88\x8b\x8b\x80\x86\x84", # "Native Village"
115: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
116: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
117: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
118: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
119: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
120: b"\x63\x87\xa2\xa5\x81\x81\x84\xa2", # "Shrubber"
121: b"\x63\xa0\x88\xa2\x88\xa4", # "Spirit"
122: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
123: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
124: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
125: b"\x43\x80\x8e", # "Dao"
126: b"\x43\x80\x8e", # "Dao"
127: b"\x43\x80\x8e", # "Dao"
128: b"\x63\x8d\x80\x8a\x84\xac\x86\x80\x8c\x84", # "Snake Game"
129: b"\x43\x80\x8e", # "Dao"
130: b"\x46\x80\x88\x80", # "Gaia"
131: b"\xd6\x3f", # "Pyramid"
132: b"\xd6\x3f", # "Pyramid"
133: b"\xd6\x3f", # "Pyramid"
134: b"\xd6\x3f", # "Pyramid"
135: b"\xd6\x3f", # "Pyramid"
136: b"\x4a\x88\x8b\x8b\x84\xa2\xac\x26", # "Killer 6"
137: b"\xd6\x3f", # "Pyramid"
138: b"\xd6\x3f", # "Pyramid"
139: b"\xd6\x3f", # "Pyramid"
140: b"\xd6\x3f", # "Pyramid"
141: b"\xd6\x3f", # "Pyramid"
142: b"\xd6\x3f", # "Pyramid"
143: b"\x41\x80\x81\x84\x8b", # "Babel"
144: b"\x41\x80\x81\x84\x8b", # "Babel"
145: b"\x41\x80\x81\x84\x8b", # "Babel"
146: b"\x41\x80\x81\x84\x8b", # "Babel"
147: b"\x49\x84\xa7\x84\x8b\x84\xa2\x0e\xa3\xac\x4c\x80\x8d\xa3\x88\x8e\x8d", # "Jeweler's Mansion"
148: "", # "Castoth"
149: "", # "Viper"
150: "", # "Vampires"
151: "", # "<NAME>"
152: "", # "Mummy Queen"
153: "" # "Olman"
}
# Define long item text for in-game format
self.item_text_long = {
0: b"\xd3\xd6\x1d\x8d\x8e\xa4\x87\x88\x8d\x86\x4f\xac\xac\xac\xac\xac\xac\xac\xac",
1: b"\xd3\xd6\x1d\x80\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\x4f\xac\xac\xac\xac",
2: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x60\xa2\x88\xa3\x8e\x8d\xac\x4a\x84\xa9\x4f\xac",
3: b"\xd3\xd6\x1d\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x40\x4f\xac\xac",
4: b"\xd3\xd6\x1d\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x41\x4f\xac\xac",
5: "",
6: b"\xd3\xd6\x1d\x80\x8d\xac\x87\x84\xa2\x81\x4f\xac\xac\xac\xac\xac\xac\xac\xac",
7: b"\xd3\x64\x87\x84\xac\x43\x88\x80\x8c\x8e\x8d\x83\xac\x41\x8b\x8e\x82\x8a\x4f",
8: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x67\x88\x8d\x83\xac\x4c\x84\x8b\x8e\x83\xa9\x4f",
9: b"\xd3\xd6\x1d\x4b\x8e\x8b\x80\x0e\xa3\xac\x4c\x84\x8b\x8e\x83\xa9\x4f\xac\xac",
10: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x4b\x80\xa2\x86\x84\xac\x62\x8e\x80\xa3\xa4\x4f",
11: b"\xd3\xd6\x1d\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x40\x4f\xac\xac\xac\xac\xac",
12: b"\xd3\xd6\x1d\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x41\x4f\xac\xac\xac\xac\xac",
13: b"\xd3\x64\x87\x84\xac\x4c\x84\x8c\x8e\xa2\xa9\xac\x4c\x84\x8b\x8e\x83\xa9\x4f",
14: b"\xd3\xd6\x1d\x80\xac\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x41\x80\x8b\x8b\x4f\xac",
15: b"\xd3\x64\x87\x84\xac\x44\x8b\x84\xa6\x80\xa4\x8e\xa2\xac\x4a\x84\xa9\x4f\xac",
16: b"\xd3\x64\x87\x84\xac\x4c\xa5\xac\x60\x80\x8b\x80\x82\x84\xac\x4a\x84\xa9\x4f",
17: b"\xd3\x64\x87\x84\xac\x60\xa5\xa2\x88\xa4\xa9\xac\x63\xa4\x8e\x8d\x84\x4f\xac",
18: b"\xd3\x40\xac\x63\xa4\x80\xa4\xa5\x84\xac\x8e\x85\xac\x47\x8e\xa0\x84\x4f\xac",
19: b"\xd3\xd6\x1d\x80\xac\x62\x80\x8c\x80\xac\x63\xa4\x80\xa4\xa5\x84\x4f\xac\xac",
20: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x4c\x80\x86\x88\x82\xac\x43\xa5\xa3\xa4\x4f\xac",
21: "",
22: b"\xd3\xd6\x1d\x4b\x80\x8d\x82\x84\x0e\xa3\xac\x4b\x84\xa4\xa4\x84\xa2\x4f\xac",
23: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x4d\x84\x82\x8a\x8b\x80\x82\x84\x4f\xac\xac\xac",
24: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x67\x88\x8b\x8b\x4f\xac\xac\xac\xac\xac\xac\xac",
25: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x64\x84\x80\xa0\x8e\xa4\x4f\xac\xac\xac\xac\xac",
26: b"\xd3\xd6\x1d\x4c\xa5\xa3\x87\xa2\x8e\x8e\x8c\xac\x43\xa2\x8e\xa0\xa3\x4f\xac",
27: "",
28: b"\xd3\x64\x87\x84\xac\x41\x8b\x80\x82\x8a\xac\x46\x8b\x80\xa3\xa3\x84\xa3\x4f",
29: b"\xd3\x64\x87\x84\xac\x46\x8e\xa2\x86\x8e\x8d\xac\x45\x8b\x8e\xa7\x84\xa2\x4f",
30: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
31: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
32: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
33: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
34: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
35: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
36: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x40\xa5\xa2\x80\x4f\xac\xac\xac\xac\xac\xac\xac",
37: b"\xd3\xd6\x1d\x4b\x8e\x8b\x80\x0e\xa3\xac\x4b\x84\xa4\xa4\x84\xa2\x4f\xac\xac",
38: b"\xd3\xd6\x1d\x45\x80\xa4\x87\x84\xa2\x0e\xa3\xac\x49\x8e\xa5\xa2\x8d\x80\x8b",
39: b"\xd3\x64\x87\x84\xac\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x62\x88\x8d\x86\x4f\xac",
40: b"\xd3\xd6\x1d\x80\x8d\xac\x40\xa0\xa0\x8b\x84\x4f\xac\xac\xac\xac\xac\xac\xac",
41: b"\xd3\xd6\x1d\x22\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\x4f\xac\xac\xac",
42: b"\xd3\xd6\x1d\x23\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\x4f\xac\xac\xac",
50: b"\xd3\xd6\x1d\x80\x8d\xac\x47\x60\xac\xa5\xa0\x86\xa2\x80\x83\x84\x4f\xac\xac",
51: b"\xd3\xd6\x1d\x80\xac\x43\x44\x45\xac\xa5\xa0\x86\xa2\x80\x83\x84\x4f\xac\xac",
52: b"\xd3\xd6\x1d\x80\xac\x63\x64\x62\xac\xa5\xa0\x86\xa2\x80\x83\x84\x4f\xac\xac",
53: b"\xd3\xd6\x3c\x43\x80\xa3\x87\xac\x88\xa3\xac\x88\x8c\xa0\xa2\x8e\xa6\x84\x83",
54: b"\xd3\x45\xa2\x88\x80\xa2\xac\x88\xa3\xac\x88\x8c\xa0\xa2\x8e\xa6\x84\x83\x4f",
55: b"\xd3\xd6\x1d\x80\xac\x47\x84\x80\xa2\xa4\xac\x60\x88\x84\x82\x84\x4f\xac\xac"
}
# Define short item text for in-game format
# Currently only used in Jeweler's inventory
self.item_text_short = {
0: b"\x4d\x8e\xa4\x87\x88\x8d\x86\xac\xac\xac\xac\xac\xac",
1: b"\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xac\xac\xac\xac",
2: b"\x60\xa2\x88\xa3\x8e\x8d\xac\x4a\x84\xa9\xac\xac\xac",
3: b"\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x40",
4: b"\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x41",
5: "",
6: b"\x47\x84\xa2\x81\xac\xac\xac\xac\xac\xac\xac\xac\xac",
7: b"\x43\x88\x80\x8c\x8e\x8d\x83\xac\x41\x8b\x8e\x82\x8a",
8: b"\x67\x88\x8d\x83\xac\x4c\x84\x8b\x8e\x83\xa9\xac\xac",
9: b"\x4b\x8e\x8b\x80\x0e\xa3\xac\x4c\x84\x8b\x8e\x83\xa9",
10: b"\x4b\x80\xa2\x86\x84\xac\x62\x8e\x80\xa3\xa4\xac\xac",
11: b"\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x40\xac\xac\xac",
12: b"\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x41\xac\xac\xac",
13: b"\x4c\x84\x8c\x8e\xa2\xa9\xac\x4c\x84\x8b\x8e\x83\xa9",
14: b"\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x41\x80\x8b\x8b\xac",
15: b"\x44\x8b\x84\xa6\x80\xa4\x8e\xa2\xac\x4a\x84\xa9\xac",
16: b"\x4c\xa5\xac\x60\x80\x8b\x80\x82\x84\xac\x4a\x84\xa9",
17: b"\x60\xa5\xa2\x88\xa4\xa9\xac\x63\xa4\x8e\x8d\x84\xac",
18: b"\x47\x8e\xa0\x84\xac\x63\xa4\x80\xa4\xa5\x84\xac\xac",
19: b"\x62\x80\x8c\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\xac",
20: b"\x4c\x80\x86\x88\x82\xac\x43\xa5\xa3\xa4\xac\xac\xac",
21: "",
22: b"\x4b\x80\x8d\x82\x84\xac\x4b\x84\xa4\xa4\x84\xa2\xac",
23: b"\x4d\x84\x82\x8a\x8b\x80\x82\x84\xac\xac\xac\xac\xac",
24: b"\x67\x88\x8b\x8b\xac\xac\xac\xac\xac\xac\xac\xac\xac",
25: b"\x64\x84\x80\xa0\x8e\xa4\xac\xac\xac\xac\xac\xac\xac",
26: b"\x63\x87\xa2\x8e\x8e\x8c\xac\x43\xa2\x8e\xa0\xa3\xac",
27: "",
28: b"\x41\x8b\x80\x82\x8a\xac\x46\x8b\x80\xa3\xa3\x84\xa3",
29: b"\x46\x8e\xa2\x86\x8e\x8d\xac\x45\x8b\x8e\xa7\x84\xa2",
30: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
31: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
32: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
33: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
34: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
35: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
36: b"\x40\xa5\xa2\x80\xac\xac\xac\xac\xac\xac\xac\xac\xac",
37: b"\x4b\x8e\x8b\x80\x0e\xa3\xac\x4b\x84\xa4\xa4\x84\xa2",
38: b"\x49\x8e\xa5\xa2\x8d\x80\x8b\xac\xac\xac\xac\xac\xac",
39: b"\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x62\x88\x8d\x86\xac",
40: b"\x40\xa0\xa0\x8b\x84\xac\xac\xac\xac\xac\xac\xac\xac",
41: b"\x22\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\xac",
42: b"\x23\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\xac",
50: b"\x47\x60\xac\x65\xa0\x86\xa2\x80\x83\x84\xac\xac\xac",
51: b"\x43\x44\x45\xac\x65\xa0\x86\xa2\x80\x83\x84\xac\xac",
52: b"\x63\x64\x62\xac\x65\xa0\x86\xa2\x80\x83\x84\xac\xac",
53: b"\x43\x80\xa3\x87\xac\x65\xa0\x86\xa2\x80\x83\x84\xac",
54: b"\x45\xa2\x88\x80\xa2\xac\x65\xa0\x86\xa2\x80\x83\x84",
55: b"\x47\x84\x80\xa2\xa4\xac\x60\x88\x84\x82\x84\xac\xac",
61: b"\xd6\x3c\x43\x80\xa3\x87",
62: b"\xd6\x3c\x63\x8b\x88\x83\x84\xa2",
63: b"\xd7\x31\x43\x80\xa3\x87",
64: b"\xd6\x0c\x45\xa2\x88\x80\xa2",
65: b"\xd6\x03\x41\x80\xa2\xa2\x88\x84\xa2",
66: b"\x44\x80\xa2\xa4\x87\xa1\xa5\x80\x8a\x84\xa2"
}
# Database of enemy groups and spritesets
# FORMAT: { ID: [ROM_Loction, HeaderCode, HeaderData, Name]}
self.enemysets = {
0: [b"\x03\x00\x10\x10\xEC\x59\xCD\x01\x04\x00\x60\xA0\x8C\x75\xDE\x10\xD0\x21\x00\x47\xED\x9F", "Underground Tunnel"],
1: [b"\x03\x00\x10\x10\xBC\x33\xC2\x01\x04\x00\x60\xA0\x0C\x77\xDE\x10\x2A\x0F\x00\xE6\x08\xD5", "Inca Ruins (Mud Monster and Larva)"],
2: [b"\x03\x00\x10\x10\x23\x4D\xC2\x01\x04\x00\x60\xA0\xCC\x77\xDE\x10\x36\x23\x00\x24\x45\xCC", "Inca Ruins (Statues)"],
3: [b"\x03\x00\x10\x10\x16\x5C\xCC\x01\x04\x00\x60\xA0\xCC\x7A\xDE\x10\x30\x29\x00\xBE\x2F\xCB", "Diamond Mine"],
4: [b"\x03\x00\x10\x10\x62\x3D\xCF\x01\x04\x00\x60\xA0\x4C\x7C\xDE\x10\x54\x1D\x00\xEF\xEE\x9E", "Sky Garden (top)"],
5: [b"\x03\x00\x10\x10\x62\x3D\xCF\x01\x04\x00\x60\xA0\x0C\x7D\xDE\x10\x54\x1D\x00\xEF\xEE\x9E", "Sky Garden (bottom)"],
6: [b"\x03\x00\x10\x10\x2D\x2E\xCC\x01\x04\x00\x60\xA0\x00\x00\xDF\x10\x16\x1C\x00\x41\x36\xD1", "Mu"],
7: [b"\x03\x00\x10\x10\xD1\x14\xCF\x01\x04\x00\x60\xA0\x40\x02\xDF\x10\x7F\x0F\x00\x2C\x2B\xD5", "Angel Dungeon"],
8: [b"\x03\x00\x10\x10\x6D\x13\xD0\x01\x04\x00\x60\xA0\x40\x05\xDF\x10\xFF\x16\x00\xF7\xF3\x99", "Great Wall"],
9: [b"\x03\x00\x10\x10\x00\x00\xD0\x01\x04\x00\x60\xA0\x40\x08\xDF\x10\x70\x0E\x00\x5C\x4D\xD8", "Mt. Kress"],
10: [b"\x03\x00\x10\x10\xEA\x15\xCE\x01\x04\x00\x70\x90\x53\x55\xDE\x10\xD5\x14\x00\x08\x73\xCC", "Ankor Wat (outside)"],
11: [b"\x03\x00\x10\x10\x81\x6A\xC1\x01\x04\x00\x70\x90\x13\x57\xDE\x10\x57\x10\x00\x5F\x39\xD4", "Ankor Wat (inside)"],
12: [b"\x03\x00\x10\x10\x0d\x18\xcb\x01\x04\x00\x60\x90\x80\x0a\xdf\x10\xfb\x13\x00\x0e\x67\xd1", "Pyramid"],
13: [b"\x03\x00\x10\x10\x16\x5C\xCC\x01\x04\x00\x60\xA0\xC0\x0C\xDF\x10\x30\x29\x00\xBE\x2F\xCB", "Jeweler's Mansion"]
}
# Enemy map database
# FORMAT: { ID: [EnemySet, RewardBoss(0 for no reward), Reward[type, tier], SearchHeader,
# SpritesetOffset,EventAddrLow,EventAddrHigh,RestrictedEnemysets]}
# ROM address for room reward table is mapID + $1aade
self.maps = {
# For now, no one can have enemyset 10 (Ankor Wat outside)
# Underground Tunnel
12: [0, 1, [0,0], b"\x0C\x00\x02\x05\x03", 4, "c867a", "c86ac", []],
13: [0, 1, [0,0], b"\x0D\x00\x02\x03\x03", 4, "c86ac", "c875c", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]],
14: [0, 1, [0,0], b"\x0E\x00\x02\x03\x03", 4, "c875c", "c8847", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]], # Weird 4way issues
15: [0, 1, [0,0], b"\x0F\x00\x02\x03\x03", 4, "c8847", "c8935", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
18: [0, 1, [0,0], b"\x12\x00\x02\x03\x03", 4, "c8986", "c8aa9", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]], # Spike balls
# Inca Ruins
27: [1, 0, [0,0], b"\x1B\x00\x02\x05\x03", 4, "c8c33", "c8c87", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]], # Moon Tribe cave
29: [1, 1, [0,0], b"\x1D\x00\x02\x0F\x03", 4, "c8cc4", "c8d85", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
32: [1, 1, [0,0], b"\x20\x00\x02\x08\x03", 4, "c8e16", "c8e75", []], # Broken statue
33: [2, 1, [0,0], b"\x21\x00\x02\x08\x03", 4, "c8e75", "c8f57", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]], # Floor switch
34: [2, 1, [0,0], b"\x22\x00\x02\x08\x03", 4, "c8f57", "c9029", []], # Floor switch
35: [2, 1, [0,0], b"\x23\x00\x02\x0A\x03", 4, "c9029", "c90d5", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]],
37: [1, 1, [0,0], b"\x25\x00\x02\x08\x03", 4, "c90f3", "c91a0", [1]], # Diamond block
38: [1, 1, [0,0], b"\x26\x00\x02\x08\x03", 4, "c91a0", "c9242", []], # Broken statues
39: [1, 1, [0,0], b"\x27\x00\x02\x0A\x03", 4, "c9242", "c92f2", []],
40: [1, 1, [0,0], b"\x28\x00\x02\x08\x03", 4, "c92f2", "c935f", [1]], # Falling blocks
# Diamond Mine
61: [3, 2, [0,0], b"\x3D\x00\x02\x08\x03", 4, "c9836", "c98b7", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
62: [3, 2, [0,0], b"\x3E\x00\x02\x08\x03", 4, "c98b7", "c991a", []],
63: [3, 2, [0,0], b"\x3F\x00\x02\x05\x03", 4, "c991a", "c9a41", []],
64: [3, 2, [0,0], b"\x40\x00\x02\x08\x03", 4, "c9a41", "c9a95", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]], # Trapped laborer (??)
65: [3, 2, [0,0], b"\x41\x00\x02\x00\x03", 4, "c9a95", "c9b39", [0, 2, 3, 4, 5, 11]], # Stationary Grundit
69: [3, 2, [0,0], b"\x45\x00\x02\x08\x03", 4, "c9ba1", "c9bf4", []],
70: [3, 2, [0,0], b"\x46\x00\x02\x08\x03", 4, "c9bf4", "c9c5c", [3, 13]],
# Sky Garden
77: [4, 2, [0,0], b"\x4D\x00\x02\x12\x03", 4, "c9db3", "c9e92", []],
78: [5, 2, [0,0], b"\x4E\x00\x02\x10\x03", 4, "c9e92", "c9f53", []],
79: [4, 2, [0,0], b"\x4F\x00\x02\x12\x03", 4, "c9f53", "ca01a", [4, 5]],
80: [5, 2, [0,0], b"\x50\x00\x02\x10\x03", 4, "ca01a", "ca0cb", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
81: [4, 2, [0,0], b"\x51\x00\x02\x12\x03", 4, "ca0cb", "ca192", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
82: [5, 2, [0,0], b"\x52\x00\x02\x10\x03", 4, "ca192", "ca247", [4, 5]],
83: [4, 2, [0,0], b"\x53\x00\x02\x12\x03", 4, "ca247", "ca335", [4, 5]],
84: [5, 2, [0,0], b"\x54\x00\x02\x12\x03", 4, "ca335", "ca43b", [4, 5]],
# Mu
# 92: [6,0,0,b"\x5C\x00\x02\x15\x03",4,[]], # Seaside Palace
95: [6, 3, [0,0], b"\x5F\x00\x02\x14\x03", 4, "ca71b", "ca7ed", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
96: [6, 3, [0,0], b"\x60\x00\x02\x14\x03", 4, "ca7ed", "ca934", [6]],
97: [6, 3, [0,0], b"\x61\x00\x02\x14\x03", 4, "ca934", "caa7b", [6]],
98: [6, 3, [0,0], b"\x62\x00\x02\x14\x03", 4, "caa7b", "cab28", []],
100: [6, 3, [0,0], b"\x64\x00\x02\x14\x03", 4, "cab4b", "cabd4", []],
101: [6, 3, [0,0], b"\x65\x00\x02\x14\x03", 4, "cabd4", "cacc3", [6]],
# Angel Dungeon
109: [7, 3, [0,0], b"\x6D\x00\x02\x16\x03", 4, "caf6e", "cb04b", [7, 8, 9, 10]], # Add 10's back in once flies are fixed
110: [7, 3, [0,0], b"\x6E\x00\x02\x18\x03", 4, "cb04b", "cb13e", [7, 8, 9, 10]],
111: [7, 3, [0,0], b"\x6F\x00\x02\x1B\x03", 4, "cb13e", "cb1ae", [7, 8, 9, 10]],
112: [7, 3, [0,0], b"\x70\x00\x02\x16\x03", 4, "cb1ae", "cb258", [7, 8, 9, 10]],
113: [7, 3, [0,0], b"\x71\x00\x02\x18\x03", 4, "cb258", "cb29e", [7, 8, 9, 10]],
114: [7, 3, [0,0], b"\x72\x00\x02\x18\x03", 4, "cb29e", "cb355", [7, 8, 9, 10]],
# Great Wall
130: [8, 4, [0,0], b"\x82\x00\x02\x1D\x03", 4, "cb6c1", "cb845", [8, 9, 10]], # Add 10's back in once flies are fixed
131: [8, 4, [0,0], b"\x83\x00\x02\x1D\x03", 4, "cb845", "cb966", [7, 8, 9, 10]],
133: [8, 4, [0,0], b"\x85\x00\x02\x1D\x03", 4, "cb97d", "cbb18", [8, 9, 10]],
134: [8, 4, [0,0], b"\x86\x00\x02\x1D\x03", 4, "cbb18", "cbb87", [7, 8, 9, 10]],
135: [8, 4, [0,0], b"\x87\x00\x02\x1D\x03", 4, "cbb87", "cbc3b", [8]],
136: [8, 4, [0,0], b"\x88\x00\x02\x1D\x03", 4, "cbc3b", "cbd0a", [7, 8, 9]],
# Mt Temple
160: [9, 4, [0,0], b"\xA0\x00\x02\x20\x03", 4, "cc18c", "cc21c", []],
161: [9, 4, [0,0], b"\xA1\x00\x02\x20\x03", 4, "cc21c", "cc335", [7, 8, 9, 10]],
162: [9, 4, [0,0], b"\xA2\x00\x02\x20\x03", 4, "cc335", "cc3df", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 13]], # Drops
163: [9, 4, [0,0], b"\xA3\x00\x02\x20\x03", 4, "cc3df", "cc4f7", []],
164: [9, 4, [0,0], b"\xA4\x00\x02\x20\x03", 4, "cc4f7", "cc5f8", [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13]],
165: [9, 4, [0,0], b"\xA5\x00\x02\x20\x03", 4, "cc5f8", "cc703", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 13]], # Drops
166: [9, 4, [0,0], b"\xA6\x00\x02\x20\x03", 4, "cc703", "cc7a1", []],
167: [9, 4, [0,0], b"\xA7\x00\x02\x20\x03", 4, "cc7a1", "cc9a3", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]],
168: [9, 4, [0,0], b"\xA8\x00\x02\x20\x03", 4, "cc9a3", "cca02", [7, 8, 9, 10]],
# <NAME>
176: [10, 6, [0,0], b"\xB0\x00\x02\x2C\x03", 4, "ccb1b", "ccbd8", []],
177: [11, 6, [0,0], b"\xB1\x00\x02\x08\x03", 4, "ccbd8", "ccca5", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]],
178: [11, 6, [0,0], b"\xB2\x00\x02\x08\x03", 4, "ccca5", "ccd26", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 13]],
179: [11, 6, [0,0], b"\xB3\x00\x02\x08\x03", 4, "ccd26", "ccd83", []],
180: [11, 6, [0,0], b"\xB4\x00\x02\x08\x03", 4, "ccd83", "ccdd7", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 13]],
181: [11, 6, [0,0], b"\xB5\x00\x02\x08\x03", 4, "ccdd7", "cce7b", []],
182: [10, 6, [0,0], b"\xB6\x00\x02\x2C\x03", 4, "cce7b", "cd005", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]],
183: [11, 6, [0,0], b"\xB7\x00\x02\x08\x03", 4, "cd005", "cd092", []], # Earthquaker Golem
184: [11, 6, [0,0], b"\xB8\x00\x02\x08\x03", 4, "cd092", "cd0df", [0, 1, 3, 4, 5, 7, 8, 9, 11, 13]],
185: [11, 6, [0,0], b"\xB9\x00\x02\x08\x03", 4, "cd0df", "cd137", []],
186: [10, 6, [0,0], b"\xBA\x00\x02\x2C\x03", 4, "cd137", "cd197", []],
187: [11, 6, [0,0], b"\xBB\x00\x02\x08\x03", 4, "cd197", "cd1f4", []],
188: [11, 6, [0,0], b"\xBC\x00\x02\x24\x03", 4, "cd1f4", "cd29a", []],
189: [11, 6, [0,0], b"\xBD\x00\x02\x08\x03", 4, "cd29a", "cd339", []],
190: [11, 6, [0,0], b"\xBE\x00\x02\x08\x03", 4, "cd339", "cd392", []],
# Pyramid
204: [12, 5, [0,0], b"\xCC\x00\x02\x08\x03", 4, "cd539", "cd58c", []],
206: [12, 5, [0,0], b"\xCE\x00\x02\x08\x03", 4, "cd5c6", "cd650", []],
207: [12, 5, [0,0], b"\xCF\x00\x02\x08\x03", 4, "cd650", "cd6f3", []],
208: [12, 5, [0,0], b"\xD0\x00\x02\x08\x03", 4, "cd6f3", "cd752", []],
209: [12, 5, [0,0], b"\xD1\x00\x02\x08\x03", 4, "cd752", "cd81b", []],
210: [12, 5, [0,0], b"\xD2\x00\x02\x08\x03", 4, "cd81b", "cd8f1", []],
211: [12, 5, [0,0], b"\xD3\x00\x02\x08\x03", 4, "cd8f1", "cd9a1", []],
212: [12, 5, [0,0], b"\xD4\x00\x02\x08\x03", 4, "cd9a1", "cda80", []],
213: [12, 5, [0,0], b"\xD5\x00\x02\x08\x03", 4, "cda80", "cdb4b", []],
214: [12, 5, [0,0], b"\xD6\x00\x02\x26\x03", 4, "cdb4b", "cdc1e", []],
215: [12, 5, [0,0], b"\xD7\x00\x02\x28\x03", 4, "cdc1e", "cdcfd", [0, 2, 3, 4, 5, 6, 8, 9, 11, 12, 13]],
216: [12, 5, [0,0], b"\xD8\x00\x02\x08\x03", 4, "cdcfd", "cde4f", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]],
217: [12, 5, [0,0], b"\xD9\x00\x02\x26\x03", 4, "cde4f", "cdf3c", []],
219: [12, 5, [0,0], b"\xDB\x00\x02\x26\x03", 4, "cdf76", "ce010", [0, 4, 5, 8, 9, 11, 12]], #Spike elevators
# Jeweler's Mansion
233: [13, 0, [0,0], b"\xE9\x00\x02\x22\x03", 4, "ce224", "ce3a6", [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 12, 13]]
}
# Database of enemy types
# FORMAT: { ID: [Enemyset, Event addr, VanillaTemplate,
# Type(1=stationary,2=walking,3=flying),OnWalkableTile,CanBeRandom,Name]}
self.enemies = {
# Underground Tunnel
0: [0, b"\x55\x87\x8a", b"\x05", 2, True, True, "Bat"], # a8755
1: [0, b"\x6c\x82\x8a", b"\x01", 2, True, True, "Ribber"],
2: [0, b"\x00\x80\x8a", b"\x02", 1, False, True, "Canal Worm"],
3: [0, b"\xf7\x85\x8a", b"\x03", 2, True, False, "King Bat"],
4: [0, b"\x76\x84\x8a", b"\x10", 2, True, True, "Skull Chaser"],
5: [0, b"\xff\x86\x8a", b"\x04", 2, True, False, "Bat Minion 1"],
6: [0, b"\x9a\x86\x8a", b"\x04", 2, True, False, "Bat Minion 2"],
7: [0, b"\x69\x86\x8a", b"\x04", 2, True, False, "Bat Minion 3"],
8: [0, b"\xcb\x86\x8a", b"\x04", 2, True, False, "Bat Minion 4"],
# Inca Ruins
10: [1, b"\xb7\x8d\x8a", b"\x0b", 2, True, True, "Slugger"],
11: [1, b"\xb6\x8e\x8a", b"\x0b", 2, True, False, "Scuttlebug"],
12: [1, b"\x1b\x8b\x8a", b"\x0a", 2, True, True, "Mudpit"],
13: [1, b"\x70\x8c\x8a", b"\x0c", 1, True, True, "Four Way"],
14: [2, b"\xee\x97\x8a", b"\x0f", 2, True, True, "Splop"],
15: [2, b"\xbc\x98\x8a", b"\x0e", 3, False, True, "Whirligig"],
16: [2, b"\xc2\x95\x8a", b"\x0d", 2, True, False, "Stone Lord R"], # shoots fire
17: [2, b"\xb3\x95\x8a", b"\x0d", 2, True, True, "Stone Lord D"], # shoots fire
18: [2, b"\xb8\x95\x8a", b"\x0d", 2, True, False, "Stone Lord U"], # shoots fire
19: [2, b"\xbd\x95\x8a", b"\x0d", 2, True, False, "Stone Lord L"], # shoots fire
20: [2, b"\x70\x90\x8a", b"\x0d", 2, True, False, "Stone Guard R"], # throws spears
21: [2, b"\x6b\x90\x8a", b"\x0d", 2, True, False, "Stone Guard L"], # throws spears
22: [2, b"\x61\x90\x8a", b"\x0d", 2, True, True, "Stone Guard D"], # throws spears
23: [2, b"\xc3\x99\x8a", b"\x0e", 1, False, False, "Whirligig (stationary)"],
# Diamond Mine
30: [3, b"\xca\xaa\x8a", b"\x18", 2, True, True, "Flayzer 1"],
31: [3, b"\x54\xaa\x8a", b"\x18", 2, True, False, "Flayzer 2"],
32: [3, b"\x8a\xaa\x8a", b"\x18", 2, True, False, "Flayzer 3"],
33: [3, b"\x03\xb1\x8a", b"\x19", 2, True, True, "Eye Stalker"],
34: [3, b"\xb3\xb0\x8a", b"\x19", 2, True, False, "Eye Stalker (stone)"],
35: [3, b"\xf5\xaf\x8a", b"\x1a", 1, True, True, "Grundit"],
# 36: [3,b"\xf5\xa4\x8a",b"\x1a","Grundit (stationary)"], # Can't randomize this guy
# Sky Garden
40: [4, b"\xb0\xb4\x8a", b"\x1d", 2, True, True, "Blue Cyber"],
41: [4, b"\x20\xc5\x8a", b"\x1b", 2, True, True, "Dynapede 1"],
42: [4, b"\x33\xc5\x8a", b"\x1b", 2, True, False, "Dynapede 2"],
43: [5, b"\xb0\xb8\x8a", b"\x1e", 2, True, True, "Red Cyber"],
44: [5, b"\x16\xc8\x8a", b"\x1c", 2, True, True, "Nitropede"],
# Mu
50: [6, b"\xcc\xe6\x8a", b"\x2b", 2, True, True, "Slipper"],
51: [6, b"\x5c\xe4\x8a", b"\x2a", 2, True, True, "Skuddle"],
52: [6, b"\x9e\xdd\x8a", b"\x28", 2, True, True, "Cyclops"],
53: [6, b"\x6e\xe2\x8a", b"\x29", 3, True, True, "Flasher"],
54: [6, b"\x07\xde\x8a", b"\x28", 2, True, False, "Cyclops (asleep)"],
55: [6, b"\xf4\xe6\x8a", b"\x2b", 2, True, True, "Slipper (falling)"],
# Angel Dungeon
60: [7, b"\x9f\xee\x8a", b"\x2d", 3, False, True, "Dive Bat"],
61: [7, b"\x51\xea\x8a", b"\x2c", 2, True, True, "Steelbones"],
62: [7, b"\x33\xef\x8a", b"\x2e", 1, True, True, "Draco"], # False for now...
63: [7, b"\xc7\xf0\x8a", b"\x2e", 1, True, True, "Ramskull"],
# Great Wall
70: [8, b"\x55\x91\x8b", b"\x33", 2, True, True, "Archer 1"],
71: [8, b"\xfe\x8e\x8b", b"\x33", 2, True, False, "Archer Statue"],
72: [8, b"\xbe\x8d\x8b", b"\x34", 2, True, True, "Eyesore"],
73: [8, b"\x70\x8c\x8b", b"\x35", 3, False, True, "Fire Bug 1"],
74: [8, b"\x70\x8c\x8b", b"\x33", 3, False, False, "Fire Bug 2"],
75: [8, b"\x23\x94\x8b", b"\x32", 2, True, True, "Asp"],
76: [8, b"\x65\x91\x8b", b"\x33", 2, True, False, "Archer 2"],
77: [8, b"\x77\x91\x8b", b"\x33", 2, True, False, "Archer 3"],
78: [8, b"\x72\x8f\x8b", b"\x46", 2, True, False, "Archer Statue (switch) 1"],
79: [8, b"\x4f\x8f\x8b", b"\x33", 2, True, False, "Archer Statue (switch) 2"],
# Mt. Kress
80: [9, b"\xac\x9b\x8b", b"\x3e", 3, True, True, "Skulker (N/S)"],
81: [9, b"\x4e\x9c\x8b", b"\x3e", 3, True, True, "Skulker (E/W)"],
82: [9, b"\x44\x9c\x8b", b"\x3e", 3, True, False, "Skulker (E/W)"],
83: [9, b"\xa2\x9b\x8b", b"\x3e", 3, True, False, "Skulker (E/W)"],
84: [9, b"\x8b\x9e\x8b", b"\x3d", 3, False, True, "Yorrick (E/W)"],
85: [9, b"\x53\x9f\x8b", b"\x3d", 3, False, False, "Yorrick (E/W)"],
86: [9, b"\x0f\x9d\x8b", b"\x3d", 3, False, True, "Yorrick (N/S)"],
87: [9, b"\xcd\x9d\x8b", b"\x3d", 3, False, False, "Yorrick (N/S)"],
88: [9, b"\x3b\x98\x8b", b"\x3f", 3, False, True, "Fire Sprite"],
89: [9, b"\xcf\xa0\x8b", b"\x3c", 2, True, True, "Acid Splasher"],
90: [9, b"\xa1\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary E)"],
91: [9, b"\x75\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary W)"],
92: [9, b"\x49\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary S)"],
93: [9, b"\x1d\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary N)"],
# Ankor Wat
100: [10, b"\xd7\xb1\x8b", b"\x49", 2, True, True, "Shrubber"],
101: [10, b"\xb4\xb1\x8b", b"\x49", 2, True, False, "Shrubber 2"],
102: [10, b"\x75\xb2\x8b", b"\x46", 2, True, True, "Zombie"],
103: [10, b"\x4f\xaf\x8b", b"\x4a", 3, True, True, "Zip Fly"], # False for now...
104: [11, b"\x8d\xbd\x8b", b"\x42", 3, True, True, "Goldcap"],
105: [11, b"\x25\xb8\x8b", b"\x45", 2, True, True, "Gorgon"],
106: [11, b"\x17\xb8\x8b", b"\x45", 2, True, False, "Gorgon (jump down)"],
107: [11, b"\xbb\xbf\x8b", b"\x43", 2, True, False, "Frenzie"],
108: [11, b"\xd0\xbf\x8b", b"\x43", 2, True, True, "Frenzie 2"],
109: [11, b"\x66\xbb\x8b", b"\x44", 1, False, True, "Wall Walker"],
110: [11, b"\x66\xbb\x8b", b"\x3a", 1, False, False, "Wall Walker 2"],
111: [11, b"\x5c\xbb\x8b", b"\x44", 1, False, False, "Wall Walker 3"],
112: [11, b"\x5c\xbb\x8b", b"\x3a", 1, False, False, "Wall Walker 4"],
113: [11, b"\xaf\x99\x88", b"\x45", 2, True, False, "Gorgon (block)"],
# Pyramid
120: [12, b"\x5f\xc6\x8b", b"\x4f", 1, True, True, "Mystic Ball (stationary)"],
121: [12, b"\xfc\xc5\x8b", b"\x4f", 2, True, True, "Mystic Ball"],
122: [12, b"\xa3\xc5\x8b", b"\x4f", 2, True, True, "Mystic Ball"],
123: [12, b"\x9d\xc3\x8b", b"\x4e", 2, True, True, "Tuts"],
124: [12, b"\x98\xc7\x8b", b"\x51", 1, True, True, "Blaster"],
125: [12, b"\x84\xc1\x8b", b"\x4c", 2, True, False, "Haunt (stationary)"],
126: [12, b"\xa7\xc1\x8b", b"\x4c", 2, True, True, "Haunt"],
# Babel Tower
# 130: [14,b"\xd7\x99\x8a",b"\x5a","Castoth (boss)"],
# 131: [14,b"\xd5\xd0\x8a",b"\x5b","Viper (boss)"],
# 132: [14,b"\x50\xf1\x8a",b"\x5c","Vampire (boss)"],
# 133: [14,b"\x9c\xf1\x8a",b"\x5c","Vampire (boss)"],
# 134: [14,b"\x00\x80\x8b",b"\x5d","Sand Fanger (boss)"],
# 135: [14,b"\x1a\xa6\x8b",b"\x5e","Mummy Queen (boss)"],
# Jeweler's Mansion
140: [13, b"\xca\xaa\x8a", b"\x61", 2, True, True, "Flayzer"],
141: [13, b"\xf5\xaf\x8a", b"\x63", 1, True, True, "Grundit"],
142: [13, b"\xd8\xb0\x8a", b"\x62", 2, True, False, "Eye Stalker 1"],
143: [13, b"\x03\xb1\x8a", b"\x62", 2, True, True, "Eye Stalker 2"]
# Bosses
# 24: [15,b"\x03\x9b\x8a",b"\x14","Castoth (boss)"],
# 45: [15,b"\x6f\xd1\x8a",b"\x27","Viper (boss)"],
# 55: [15,b"\xf7\xf1\x8a",b"\x2f","Vampire (boss)"],
# 56: [15,b"\xc8\xf3\x8a",b"\x30","Vampire (boss)"],
# 79: [15,b"\x5c\x81\x8b",b"\x36","Sand Fanger (boss)"],
# 128: [15,b"\xb6\xa6\x8b",b"\x50","Mummy Queen (boss)"],
# 143: [15,b"\x09\xf7\x88",b"\x5f","Solid Arm (boss)"],
# 140: [15,b"\xaa\xee\x8c",b"\x54","Dark Gaia"]
}
# Database of non-enemy sprites to disable in enemizer
# FORMAT: { ID: [Enemyset, Event addr, Name]}
self.nonenemy_sprites = {
# Underground Tunnel
0: [0, "a8835", "Movable statue"],
1: [0, "a87ce", "Falling spear 1"],
2: [0, "a87c3", "Falling spear 2"],
3: [0, "a8aae", "Spike ball 1"],
4: [0, "a8a0f", "Spike ball 2"],
5: [0, "a8a7d", "Spike ball 3"],
6: [0, "a8a46", "Spike ball 4"],
7: [0, "a89de", "Spike ball 5"],
# Inca Ruins
10: [1, "9c26f", "Skeleton 1"],
11: [1, "9c798", "Skeleton 2"],
# 12: [1,"9c89d","Skeleton 3"], # Spriteset already restricted for this room
13: [1, "9c8f7", "Skeleton 4"],
14: [1, "a8896", "Broken statue (chest)"],
15: [1, "a88de", "Broken statue (blockade)"],
# Diamond Mine
20: [3, "5d6a8", "Elevator sign"],
21: [3, "aa4f5", "Elevator platform 1"],
22: [3, "aa50c", "Elevator platform 2"],
23: [3, "aa4e2", "Elevator platform 3"],
# Sky Garden
30: [4, "5f8c0", "Broken statue"],
31: [4, "ac0fe", "Sword statue 1"],
# 32: [4,"ac150","Sword statue 2"],
33: [4, "ac3b3", "Sword statue 3"],
# 34: [4,"ac409","Sword statue 4"],
35: [4, "accd4", "Fire snake (top)"],
36: [5, "accf1", "Fire snake (bottom)"],
# Mu
40: [6, "69ce9", "Floor spikes 1"],
41: [6, "69d1f", "Floor spikes 2"],
42: [6, "ae943", "Fire snake"],
# 43: [6,"69d4d","Donut"],
# Angel
50: [7, "6d56f", "Flame 1"],
51: [7, "6d57e", "Flame 2"],
52: [7, "6d58f", "Flame 3"],
# Great Wall
60: [8, "b8c30", "Wall spike 1"],
61: [8, "b8bf8", "Wall spike 2"],
62: [8, "7bd17", "Wall spike 3"],
63: [8, "7bd46", "Wall spike 4"],
64: [8, "7bd75", "Wall spike 5"],
65: [8, "7bce8", "Wall spike 5"],
# Mt Kress (nothing)
# Ankor Wat
80: [11, "89f2c", "Floating crystal"],
81: [11, "89ffc", "Skeleton 1"],
82: [11, "8a25e", "Skeleton 2"]
# Pyramid
# 90: [12,"8b6a2","Warp point"],
# 91: [12,"8cd6c","Warp point"],
# Jeweler's Mansion (nothing)
}
# Database of overworld menus
# FORMAT: { ID: [ShuffleID (0=no shuffle), Menu_ID, FromRegion, ToRegion, ROM_EntranceData, ROM_TextLoc, MenuText, ContinentName, AreaName]}
self.overworld_menus = {
# SW Continent "\x01"
1: [0, b"\x01", 10, 20, "3b95b", "0cafd", "3b590", "SW Continent", "South Cape"],
2: [0, b"\x01", 10, 30, "3b96b", "0cb26", "3b5a9", "SW Continent", "Edward's"],
3: [0, b"\x01", 10, 50, "3b97b", "0cb5b", "3b5b5", "SW Continent", "Itory"],
4: [0, b"\x01", 10, 60, "3b98b", "4f453", "3b5c2", "SW Continent", "Moon Tribe"],
5: [0, b"\x01", 10, 63, "3b99b", "0cb74", "3b59c", "SW Continent", "Inca"],
# SE Continent "\x07"
6: [0, b"\x07", 11, 102, "3b9ab", "5aab7", "3b5ef", "SE Continent", "Diamond Coast"],
7: [0, b"\x07", 11, 110, "3b9bb", "0cba3", "3b5e3", "SE Continent", "Freejia"],
8: [0, b"\x07", 11, 133, "3b9cb", "0cbbc", "3b608", "SE Continent", "Diamond Mine"],
9: [0, b"\x07", 11, 160, "3b9db", "5e31e", "3b615", "SE Continent", "Neil's"],
10: [0, b"\x07", 11, 162, "3b9eb", "5e812", "3b5fc", "SE Continent", "Nazca"],
# NE Continent "\x0a"
11: [0, b"\x0a", 12, 250, "3ba1b", "0cbeb", "3b642", "NE Continent", "Angel Village"],
12: [0, b"\x0a", 12, 280, "3ba2b", "0cc30", "3b636", "NE Continent", "Watermia"],
13: [0, b"\x0a", 12, 290, "3ba3b", "0cc49", "3b64f", "NE Continent", "Great Wall"],
# N Continent "\x0f"
14: [0, b"\x0f", 13, 310, "3ba4b", "0cc8e", "3b660", "N Continent", "Euro"],
15: [0, b"\x0f", 13, 330, "3ba5b", "0cca7", "3b66c", "N Continent", "Mt. Temple"],
16: [0, b"\x0f", 13, 350, "3ba6b", "0ccec", "3b679", "N Continent", "Native's Village"],
17: [0, b"\x0f", 13, 360, "3ba7b", "0cd05", "3b685", "N Continent", "Ankor Wat"],
# NW Continent Overworld "\x16"
18: [0, b"\x16", 14, 400, "3ba8b", "0cd24", "3b696", "NW Continent", "Dao"],
19: [0, b"\x16", 14, 410, "3ba9b", "0cd55", "3b6a3", "NW Continent", "Pyramid"]
}
# Database of special map exits that don't conform to the typical "02 26" format, IDs correspond to self.exits
# FORMAT: { ID: [MapAddr, Xaddr, Yaddr, FaceDirAddr, CameraAddr]}
self.exits_detailed = {
15: ["8ce31", "8ce37", "8ce40", "", "8ce49"] # Mummy Queen exit
}
# Database of map exits
# FORMAT: { ID: [CoupleID (0 if one-way), ShuffleTo (0 if no shuffle), ShuffleFrom (0 if no shuffle), FromRegion, ToRegion,
# ROM_Location, DestString,BossFlag, DungeonFlag, DungeonEntranceFlag, Name]}
self.exits = {
# Bosses
1: [ 2, 0, 0, 78, 97, "18872", b"\x29\x78\x00\xC0\x00\x00\x00\x11", True, True, False, "Castoth entrance (in)"],
2: [ 1, 0, 0, 0, 0, "189e4", b"\x1E\x68\x00\x00\x01\x03\x00\x24", True, True, False, "Castoth entrance (out)"],
3: [ 0, 0, 0, 104, 102, "584cc", b"\x30\x48\x00\x10\x01\x83\x00\x21", True, True, False, "Diamond Coast passage (Gold Ship)"],
4: [ 5, 0, 0, 171, 198, "18e20", b"\x55\x70\x00\xE0\x01\x00\x00\x22", True, True, False, "Viper entrance (in)"],
5: [ 4, 0, 0, 0, 0, "19006", b"\x4C\xF8\x00\x30\x00\x03\x00\x22", True, True, False, "Viper entrance (out)"],
6: [ 0, 0, 0, 198, 200, "acece", b"\x5A\x90\x00\x70\x00\x83\x00\x14", True, True, False, "Seaside Palace passage (Viper)"],
7: [ 8, 0, 0, 241, 243, "69c62", b"\x67\x78\x01\xd0\x01\x80\x01\x22", True, True, False, "Vampires entrance (in)"],
8: [ 7, 0, 0, 0, 0, "193f8", b"\x65\xb8\x00\x80\x02\x03\x00\x44", True, True, False, "Vampires entrance (out)"],
9: [ 0, 0, 0, 242, 212, "193ea", b"\x5f\x80\x00\x50\x00\x83\x00\x44", True, True, False, "Vampires exit"],
10: [11, 0, 0, 301, 302, "19c2a", b"\x8A\x50\x00\x90\x00\x87\x00\x33", True, True, False, "Sand Fanger entrance (in)"],
11: [10, 0, 0, 0, 0, "19c78", b"\x88\xE0\x03\x90\x00\x06\x00\x14", True, True, False, "Sand Fanger entrance (out)"],
12: [ 0, 0, 0, 303, 290, "19c84", b"\x82\x10\x00\x90\x00\x87\x00\x18", True, True, False, "Sand Fanger exit"],
13: [14, 0, 0, 414, 448, "8cdcf", b"\xDD\xF8\x00\xB0\x01\x00\x00\x22", True, True, False, "Mummy Queen entrance (in)"],
14: [13, 0, 0, 0, 0, "", b"\xCC\xF8\x01\x20\x01\x03\x00\x44", True, True, False, "Mummy Queen entrance (out)"], # fake
15: [ 0, 0, 0, 448, 415, "", b"\xCD\x70\x00\x90\x00\x83\x00\x11", True, True, False, "Mummy Queen exit"], # This one's dumb, see exits_detailed
16: [17, 0, 0, 470, 471, "1a8c2", b"\xE3\xD8\x00\x90\x03\x83\x30\x44", True, True, False, "Babel entrance (in)"],
17: [16, 0, 0, 0, 0, "1a8d0", b"\xE2\xD0\x00\xE0\x00\x03\x00\x84", True, True, False, "Babel entrance (out)"],
18: [ 0, 0, 0, 472, 400, "9804a", b"\xC3\x10\x02\x90\x00\x83\x00\x23", True, True, False, "Dao passage (Babel)"],
19: [20, 0, 0, 481, 482, "1a94e", b"\xEA\x78\x00\xC0\x00\x00\x00\x11", True, True, False, "Solid Arm entrance (in)"],
20: [19, 0, 0, 0, 0, "", b"\xE9\x78\x03\x90\x00\x03\x00\x44", True, True, False, "Solid Arm entrance (out)"], # fake
21: [ 0, 0, 0, 472, 400, "", b"\xC3\x10\x02\x90\x00\x83\x00\x23", True, True, False, "Dao passage (Solid Arm)"], # fake
# 21: [ 0, 0, 0, 482, 472, "", b"\xE3\x80\x02\xB0\x01\x80\x10\x23", True, True, False, "Babel passage (Solid Arm)"], # This one stays, @98115
# Passage Menus
22: [0, 0, 0, 15, 28, "", b"", False, False, False, "Seth: Passage 1 (South Cape)"],
23: [0, 0, 0, 15, 102, "", b"", False, False, False, "Seth: Passage 2 (Diamond Coast)"],
24: [0, 0, 0, 15, 280, "", b"", False, False, False, "Seth: Passage 3 (Watermia)"],
25: [0, 0, 0, 16, 60, "", b"", False, False, False, "Moon Tribe: Passage 1 (Moon Tribe)"],
26: [0, 0, 0, 16, 200, "", b"", False, False, False, "Moon Tribe: Passage 2 (Seaside Palace)"],
27: [0, 0, 0, 17, 161, "", b"", False, False, False, "Neil: Passage 1 (Neil's)"],
28: [0, 0, 0, 17, 314, "", b"", False, False, False, "Neil: Passage 2 (Euro)"],
29: [0, 0, 0, 17, 402, "", b"", False, False, False, "Neil: Passage 3 (Dao)"],
30: [0, 0, 0, 17, 460, "", b"", False, False, False, "Neil: Passage 4 (Babel)"],
# South Cape
31: [32, 0, 0, 20, 22, "18444", b"", False, False, False, "South Cape: School main (in)"], # Duplicate exit at 18438?
32: [31, 0, 0, 0, 0, "1856c", b"", False, False, False, "South Cape: School main (out)"],
33: [34, 0, 0, 21, 22, "18498", b"", False, False, False, "South Cape: School roof (in)"],
34: [33, 0, 0, 0, 0, "18560", b"", False, False, False, "South Cape: School roof (out)"],
35: [36, 0, 0, 20, 23, "18474", b"", False, False, False, "South Cape: Will's House (in)"],
36: [35, 0, 0, 0, 0, "1852a", b"", False, False, False, "South Cape: Will's House (out)"],
37: [38, 0, 0, 20, 24, "18480", b"", False, False, False, "South Cape: East House (in)"],
38: [37, 0, 0, 0, 0, "18552", b"", False, False, False, "South Cape: East House (out)"],
39: [40, 0, 0, 20, 27, "1845c", b"", False, False, False, "South Cape: Erik's House main (in)"],
40: [39, 0, 0, 0, 0, "184e8", b"", False, False, False, "South Cape: Erik's House main (out)"],
41: [42, 0, 0, 20, 27, "184a4", b"", False, False, False, "South Cape: Erik's House roof (in)"],
42: [41, 0, 0, 0, 0, "184f4", b"", False, False, False, "South Cape: Erik's House roof (out)"],
43: [44, 0, 0, 20, 26, "18450", b"", False, False, False, "South Cape: Lance's House (in)"],
44: [43, 0, 0, 0, 0, "184c0", b"", False, False, False, "South Cape: Lance's House (out)"],
45: [46, 0, 0, 20, 25, "18468", b"", False, False, False, "South Cape: Seth's House (in)"],
46: [45, 0, 0, 0, 0, "1851c", b"", False, False, False, "South Cape: Seth's House (out)"],
47: [48, 0, 0, 20, 28, "1848c", b"", False, False, False, "South Cape: Seaside Cave (in)"],
48: [47, 0, 0, 0, 0, "4be6a", b"", False, False, False, "South Cape: Seaside Cave (out)"],
# Edward's / Prison
50: [51, 0, 0, 31, 49, "1857c", b"", False, True, True, "Tunnel back entrance (in)"],
51: [50, 0, 0, 0, 0, "186f4", b"", False, True, True, "Tunnel back entrance (out)"],
52: [53, 0, 0, 33, 40, "1860c", b"\x0C\x58\x00\x50\x00\x83\x00\x12", False, True, True, "Tunnel entrance (in)"], # set checkpoint
53: [52, 0, 0, 0, 0, "18626", b"", False, True, True, "Tunnel entrance (out)"],
54: [ 0, 0, 0, 30, 32, "4c755", b"", False, False, False, "Prison entrance (king)"],
#55: [54, 0, 0, 0, 2, "", b"\x0a\xe0\x01\x60\x01\x03\x20\x34", False, False, False, "Prison exit (king), fake"],
# Tunnel
60: [61, 0, 0, 40, 41, "18632", b"", False, True, False, "Tunnel: Map 12 to Map 13"],
61: [60, 0, 0, 0, 0, "18640", b"", False, True, False, "Tunnel: Map 13 to Map 12"],
62: [63, 0, 0, 41, 42, "1864c", b"", False, True, False, "Tunnel: Map 13 to Map 14"],
63: [62, 0, 0, 0, 0, "1865a", b"", False, True, False, "Tunnel: Map 14 to Map 13"],
64: [65, 0, 0, 42, 43, "18666", b"", False, True, False, "Tunnel: Map 14 to Map 15"],
65: [64, 0, 0, 0, 0, "18680", b"", False, True, False, "Tunnel: Map 15 to Map 14"],
66: [67, 0, 0, 43, 44, "1868c", b"", False, True, False, "Tunnel: Map 15 to Map 16"],
67: [66, 0, 0, 0, 0, "1869a", b"", False, True, False, "Tunnel: Map 16 to Map 15"],
68: [69, 0, 0, 43, 45, "18674", b"", False, True, False, "Tunnel: Map 15 to Map 17"],
69: [68, 0, 0, 0, 0, "186a8", b"", False, True, False, "Tunnel: Map 17 to Map 15"],
70: [71, 0, 0, 46, 47, "186b4", b"", False, True, False, "Tunnel: Map 17 to Map 18"],
71: [70, 0, 0, 0, 0, "186c2", b"", False, True, False, "Tunnel: Map 18 to Map 17"],
72: [73, 0, 0, 48, 49, "186ce", b"", False, True, False, "Tunnel: Map 18 to Map 19"],
73: [72, 0, 0, 0, 0, "186e8", b"", False, True, False, "Tunnel: Map 19 to Map 18"],
# Itory
80: [81, 0, 0, 51, 53, "18704", b"", False, False, False, "Itory: West House (in)"],
81: [80, 0, 0, 0, 0, "1874e", b"", False, False, False, "Itory: West House (out)"],
82: [83, 0, 0, 51, 54, "18728", b"", False, False, False, "Itory: North House (in)"],
83: [82, 0, 0, 0, 0, "18776", b"", False, False, False, "Itory: North House (out)"],
84: [85, 0, 0, 51, 55, "18710", b"", False, False, False, "Itory: Lilly Front Door (in)"],
85: [84, 0, 0, 0, 0, "1875c", b"", False, False, False, "Itory: Lilly Front Door (out)"],
86: [87, 0, 0, 52, 55, "1871c", b"", False, False, False, "Itory: Lilly Back Door (in)"],
87: [86, 0, 0, 0, 0, "18768", b"", False, False, False, "Itory: Lilly Back Door (out)"],
88: [89, 0, 0, 51, 56, "18734", b"", False, False, False, "Itory Cave (in)"],
89: [88, 0, 0, 0, 0, "18784", b"", False, False, False, "Itory Cave (out)"],
90: [91, 0, 0, 57, 58, "18790", b"", False, False, False, "Itory Cave Hidden Room (in)"], # always linked?
91: [90, 0, 0, 0, 0, "1879c", b"", False, False, False, "Itory Cave Hidden Room (out)"],
# Moon Tribe
100: [101, 0, 0, 60, 61, "187b6", b"", False, False, False, "Moon Tribe Cave (in)"],
101: [100, 0, 0, 0, 0, "187c4", b"", False, False, False, "Moon Tribe Cave (out)"],
102: [ 0, 0, 0, 64, 170, "9d1ea", b"", False, True, True, "Moon Tribe: Sky Garden passage"],
# Inca
110: [111, 0, 0, 63, 70, "187d2", b"", False, True, True, "Inca Ruins entrance (in)"],
111: [110, 0, 0, 0, 0, "187e0", b"", False, True, True, "Inca Ruins entrance (out)"],
#114: [ 0, 0, 0, 65, 102, "", b"", False, False, True, "Inca: Diamond Coast passage"],
# Inca Ruins
120: [121, 0, 0, 70, 89, "", b"", False, True, False, "Inca: Map 29 to Map 37 (E)"],
121: [120, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 37 to Map 29 (E)"],
122: [123, 0, 0, 89, 94, "", b"", False, True, False, "Inca: Map 37 to Map 39"],
123: [122, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 39 to Map 37"],
124: [125, 0, 0, 94, 71, "", b"", False, True, False, "Inca: Map 39 to Map 29"],
125: [124, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 39"],
126: [127, 0, 0, 90, 72, "", b"", False, True, False, "Inca: Map 37 to Map 29 (W)"],
127: [126, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 37 (W)"],
128: [129, 0, 0, 72, 91, "", b"", False, True, False, "Inca: Map 29 to Map 38"],
129: [128, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 38 to Map 29"],
130: [131, 0, 0, 73, 80, "", b"", False, True, False, "Inca: Map 29 to Map 32"],
131: [130, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 32 to Map 29"],
132: [133, 0, 0, 81, 85, "", b"", False, True, False, "Inca: Map 32 to Map 35"],
133: [132, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 35 to Map 32"],
134: [135, 0, 0, 85, 74, "", b"", False, True, False, "Inca: Map 35 to Map 29"],
135: [134, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 35 to Map 29"],
136: [137, 0, 0, 74, 79, "", b"", False, True, False, "Inca: Map 29 to Map 31"],
137: [136, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 31 to Map 29"],
138: [139, 0, 0, 79, 95, "", b"", False, True, False, "Inca: Map 31 to Map 40"],
139: [138, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 40 to Map 31"],
140: [141, 0, 0, 96, 76, "", b"", False, True, False, "Inca: Map 40 to Map 29"],
141: [140, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 40"],
142: [143, 0, 0, 86, 82, "", b"", False, True, False, "Inca: Map 35 to Map 33"],
143: [142, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 33 to Map 35"],
144: [145, 0, 0, 83, 75, "", b"", False, True, False, "Inca: Map 33 to Map 29"],
145: [144, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 33"],
146: [147, 0, 0, 99, 84, "", b"", False, True, False, "Inca: Map 29 to Map 34"], # Special case to allow for Z-ladder glitch
147: [146, 0, 0, 84, 75, "", b"", False, True, False, "Inca: Map 34 to Map 29"],
148: [149, 0, 0, 84, 93, "", b"", False, True, False, "Inca: Map 34 to Map 38"],
149: [148, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 38 to Map 34"],
150: [151, 0, 0, 84, 87, "", b"", False, True, False, "Inca: Map 34 to Map 36"],
151: [150, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 36 to Map 34"],
152: [153, 0, 0, 88, 77, "", b"", False, True, False, "Inca: Map 36 to Map 30"],
153: [152, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 30 to Map 36"],
154: [ 0, 0, 0, 98, 100, "", b"", False, True, False, "Gold Ship entrance"],
# Gold Ship
160: [161, 0, 0, 100, 101, "", b"", False, False, False, "Gold Ship Interior (in)"],
161: [160, 0, 0, 0, 0, "", b"", False, False, False, "Gold Ship Interior (out)"],
# Diamond Coast
172: [173, 0, 0, 102, 103, "18aa0", b"", False, False, False, "Coast House (in)"],
173: [172, 0, 0, 0, 0, "18aae", b"", False, False, False, "Coast House (out)"],
# Freejia
182: [183, 0, 0, 110, 116, "18aec", b"", False, False, False, "Freejia: West House (in)"],
183: [182, 0, 0, 0, 0, "18b9c", b"", False, False, False, "Freejia: West House (out)"],
184: [185, 0, 0, 110, 117, "18af8", b"", False, False, False, "Freejia: 2-story House (in)"],
185: [184, 0, 0, 0, 0, "18bc4", b"", False, False, False, "Freejia: 2-story House (out)"],
186: [187, 0, 0, 111, 117, "18b04", b"", False, False, False, "Freejia: 2-story Roof (in)"],
187: [186, 0, 0, 0, 0, "18bd0", b"", False, False, False, "Freejia: 2-story Roof (out)"],
188: [189, 0, 0, 110, 118, "18b10", b"", False, False, False, "Freejia: Lovers' House (in)"],
189: [188, 0, 0, 0, 0, "18bf8", b"", False, False, False, "Freejia: Lovers' House (out)"],
190: [191, 0, 0, 110, 119, "18b1c", b"", False, False, False, "Freejia: Hotel (in)"],
191: [190, 0, 0, 0, 0, "18c20", b"", False, False, False, "Freejia: Hotel (out)"],
192: [193, 0, 0, 119, 120, "18c2c", b"", False, False, False, "Freejia: Hotel West Room (in)"],
193: [192, 0, 0, 0, 0, "18c44", b"", False, False, False, "Freejia: Hotel West Room (out)"],
194: [195, 0, 0, 119, 121, "18c38", b"", False, False, False, "Freejia: Hotel East Room (in)"],
195: [194, 0, 0, 0, 0, "18c50", b"", False, False, False, "Freejia: Hotel East Room (out)"],
196: [197, 0, 0, 110, 122, "18b28", b"", False, False, False, "Freejia: Laborer House (in)"], # might take this out?
197: [196, 0, 0, 0, 0, "18c84", b"", False, False, False, "Freejia: Laborer House (out)"],
198: [199, 0, 0, 112, 122, "18b34", b"", False, False, False, "Freejia: Laborer Roof (in)"],
199: [198, 0, 0, 0, 0, "18c78", b"", False, False, False, "Freejia: Laborer Roof (out)"],
200: [201, 0, 0, 110, 123, "18b40", b"", False, False, False, "Freejia: Messy House (in)"],
201: [200, 0, 0, 0, 0, "18c92", b"", False, False, False, "Freejia: Messy House (out)"],
202: [203, 0, 0, 110, 124, "18abc", b"", False, False, False, "Freejia: Erik House (in)"],
203: [202, 0, 0, 0, 0, "18b5a", b"", False, False, False, "Freejia: Erik House (out)"],
204: [205, 0, 0, 110, 125, "18ac8", b"", False, False, False, "Freejia: Dark Space House (in)"],
205: [204, 0, 0, 0, 0, "18b68", b"", False, False, False, "Freejia: Dark Space House (out)"],
206: [207, 0, 0, 110, 126, "18ad4", b"", False, False, False, "Freejia: Labor Trade House (in)"],
207: [206, 0, 0, 0, 0, "18b82", b"", False, False, False, "Freejia: Labor Trade House (out)"],
208: [209, 0, 0, 113, 126, "18ae0", b"", False, False, False, "Freejia: Labor Trade Roof (in)"],
209: [208, 0, 0, 0, 0, "18b8e", b"", False, False, False, "Freejia: Labor Trade Roof (out)"],
210: [211, 0, 0, 114, 127, "18b4c", b"", False, False, False, "Freejia: Labor Market (in)"],
211: [210, 0, 0, 0, 0, "18ca0", b"", False, False, False, "Freejia: Labor Market (out)"],
# Diamond Mine
222: [223, 0, 0, 133, 134, "", b"", False, True, False, "Diamond Mine: Map 62 to Map 63"],
223: [222, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 63 to Map 62"],
224: [225, 0, 0, 135, 140, "", b"", False, True, False, "Diamond Mine: Map 63 to Map 66"],
225: [224, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 63"],
226: [227, 0, 0, 134, 136, "", b"", False, True, False, "Diamond Mine: Map 63 to Map 64"],
227: [226, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 64 to Map 63"],
228: [229, 0, 0, 136, 138, "", b"", False, True, False, "Diamond Mine: Map 64 to Map 65"],
229: [228, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 65 to Map 64"],
230: [231, 0, 0, 139, 143, "", b"", False, True, False, "Diamond Mine: Map 65 to Map 66"],
231: [230, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 65"],
232: [233, 0, 0, 138, 130, "", b"", False, True, False, "Diamond Mine: Map 65 to Map 61"],
233: [232, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 61 to Map 65"],
234: [235, 0, 0, 132, 142, "", b"", False, True, False, "Diamond Mine: Map 61 to Map 66"],
235: [234, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 61"],
236: [237, 0, 0, 140, 144, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 67 (1)"],
237: [236, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 67 to Map 66 (1)"],
238: [239, 0, 0, 145, 141, "", b"", False, True, False, "Diamond Mine: Map 67 to Map 66 (2)"],
239: [238, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 67 (2)"],
240: [241, 0, 0, 141, 146, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 68"],
241: [240, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 66"],
242: [243, 0, 0, 146, 148, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 69"],
243: [242, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 69 to Map 68"],
244: [245, 0, 0, 146, 149, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 70"],
245: [244, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 70 to Map 68"],
246: [247, 0, 0, 147, 150, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 71"],
247: [246, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 71 to Map 68"],
# Nazca
260: [261, 0, 0, 162, 170, "5e6a2", b"\x4C\x68\x01\x40\x00\x83\x00\x22", False, True, True, "Nazca: Sky Garden entrance"],
261: [260, 0, 0, 0, 0, "5f429", b"\x4B\xe0\x01\xc0\x02\x03\x00\x44", False, True, True, "Nazca: Sky Garden exit"],
# Sky Garden
#270: [ 0, 0, 0, 171, 16, "", b"", False, True, True, "Moon Tribe: Sky Garden passage"],
273: [274, 0, 0, 170, 172, "", b"", False, True, False, "Sky Garden: Map 76 to Map 77"],
274: [273, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 77 to Map 76"],
275: [276, 0, 0, 170, 176, "", b"", False, True, False, "Sky Garden: Map 76 to Map 79"],
276: [275, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 79 to Map 76"],
277: [278, 0, 0, 170, 181, "", b"", False, True, False, "Sky Garden: Map 76 to Map 81"],
278: [277, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 81 to Map 76"],
279: [280, 0, 0, 170, 190, "", b"", False, True, False, "Sky Garden: Map 76 to Map 83"],
280: [279, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 83 to Map 76"],
281: [282, 0, 0, 172, 175, "", b"", False, True, False, "Sky Garden: Map 77 to Map 78 (E)"], # Room 1
282: [281, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 78 to Map 77 (W)"],
283: [284, 0, 0, 175, 173, "", b"", False, True, False, "Sky Garden: Map 78 to Map 77 (SE)"],
284: [283, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 77 to Map 78 (SW)"],
285: [286, 0, 0, 175, 174, "", b"", False, True, False, "Sky Garden: Map 78 to Map 77 (SW)"],
286: [285, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 77 to Map 78 (SE)"],
287: [288, 0, 0, 176, 169, "", b"", False, True, False, "Sky Garden: Map 79 to Map 86"], # Room 2
288: [287, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 86 to Map 79"],
289: [290, 0, 0, 176, 179, "", b"", False, True, False, "Sky Garden: Map 79 to Map 80 (NE)"],
290: [289, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 80 to Map 79 (NW)"],
291: [292, 0, 0, 179, 177, "", b"", False, True, False, "Sky Garden: Map 80 to Map 79 (N)"],
292: [291, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 79 to Map 80 (N)"],
293: [294, 0, 0, 178, 180, "", b"", False, True, False, "Sky Garden: Map 79 to Map 80 (S)"],
294: [293, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 80 to Map 79 (S)"],
295: [296, 0, 0, 168, 186, "", b"", False, True, False, "Sky Garden: Map 81 to Map 82 (NE)"], # Room 3
296: [295, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 82 to Map 81 (NW)"],
297: [298, 0, 0, 182, 188, "", b"", False, True, False, "Sky Garden: Map 81 to Map 82 (NW)"],
298: [297, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 82 to Map 81 (NE)"],
299: [300, 0, 0, 184, 187, "", b"", False, True, False, "Sky Garden: Map 81 to Map 82 (SE)"],
300: [299, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 82 to Map 81 (SW)"],
301: [302, 0, 0, 191, 196, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (NW)"], # Room 4
302: [301, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (NE)"],
303: [304, 0, 0, 192, 195, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (C)"],
304: [303, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (C)"],
305: [306, 0, 0, 197, 193, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (SE)"],
306: [305, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (SW)"],
307: [308, 0, 0, 167, 195, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (E)"],
308: [307, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (W)"],
# Seaside Palace
310: [311, 0, 0, 211, 201, "69759", b"", False, False, False, "Seaside entrance"], # ALWAYS LINKED
311: [310, 0, 0, 0, 0, "1906a", b"", False, False, False, "Seaside exit"],
312: [313, 0, 0, 200, 202, "19046", b"", False, False, False, "Seaside: Area 1 NE Room (in)"],
313: [312, 0, 0, 0, 0, "19114", b"", False, False, False, "Seaside: Area 1 NE Room (out)"],
314: [315, 0, 0, 200, 203, "19052", b"", False, False, False, "Seaside: Area 1 NW Room (in)"],
315: [314, 0, 0, 0, 0, "19120", b"", False, False, False, "Seaside: Area 1 NW Room (out)"],
316: [317, 0, 0, 200, 204, "1905e", b"", False, False, False, "Seaside: Area 1 SE Room (in)"],
317: [316, 0, 0, 0, 0, "1912c", b"", False, False, False, "Seaside: Area 1 SE Room (out)"],
318: [319, 0, 0, 200, 205, "1903a", b"", False, False, False, "Seaside: Area 2 entrance"],
319: [318, 0, 0, 0, 0, "19146", b"", False, False, False, "Seaside: Area 2 exit"],
320: [321, 0, 0, 205, 207, "1915e", b"", False, False, False, "Seaside: Area 2 SW Room (in)"],
321: [320, 0, 0, 0, 0, "19138", b"", False, False, False, "Seaside: Area 2 SW Room (out)"],
322: [323, 0, 0, 205, 209, "19152", b"", False, False, False, "Seaside: Fountain (in)"],
323: [322, 0, 0, 0, 0, "191d4", b"", False, False, False, "Seaside: Fountain (out)"],
# Mu
330: [331, 0, 0, 210, 212, "191ee", b"", False, True, True, "Mu entrance"],
331: [330, 0, 0, 0, 0, "191fc", b"", False, True, True, "Mu exit"],
332: [333, 0, 0, 212, 217, "", b"", False, True, False, "Mu: Map 95 to Map 96"],
333: [332, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 96 to Map 95"],
334: [335, 0, 0, 217, 220, "", b"", False, True, False, "Mu: Map 96 to Map 97 (top)"],
335: [334, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 97 to Map 96 (top)"],
336: [337, 0, 0, 220, 231, "", b"", False, True, False, "Mu: Map 97 to Map 99"],
337: [336, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 99 to Map 97"],
338: [339, 0, 0, 220, 225, "", b"", False, True, False, "Mu: Map 97 to Map 98 (top)"],
339: [338, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 97 (top)"],
340: [341, 0, 0, 218, 222, "", b"", False, True, False, "Mu: Map 96 to Map 97 (middle)"],
341: [340, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 97 to Map 96 (middle)"],
342: [343, 0, 0, 223, 227, "", b"", False, True, False, "Mu: Map 97 to Map 98 (middle)"],
343: [342, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 97 (middle)"],
# 344: [345, 0, 0, 000, 000, "", b"", False, True, False, "Mu: Map 95 to Map 98 (middle)"],
# 345: [344, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 95 (middle)"],
346: [347, 0, 0, 227, 233, "", b"", False, True, False, "Mu: Map 98 to Map 100 (middle E)"],
347: [346, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 100 to Map 98 (middle E)"],
348: [349, 0, 0, 233, 237, "", b"", False, True, False, "Mu: Map 100 to Map 101 (middle N)"],
349: [348, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 101 to Map 100 (middle N)"],
350: [351, 0, 0, 237, 234, "", b"", False, True, False, "Mu: Map 101 to Map 100 (middle S)"],
351: [350, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 100 to Map 101 (middle S)"],
352: [353, 0, 0, 234, 228, "", b"", False, True, False, "Mu: Map 100 to Map 98 (middle W)"],
353: [352, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 100 (middle W)"],
354: [355, 0, 0, 213, 232, "", b"", False, True, False, "Mu: Map 95 to Map 99"],
355: [354, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 99 to Map 95"],
356: [357, 0, 0, 245, 246, "", b"", False, True, False, "Mu: Map 95 to Map 98 (top)"],
357: [356, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 95 (top)"],
358: [359, 0, 0, 229, 224, "", b"", False, True, False, "Mu: Map 98 to Map 97 (bottom)"],
359: [358, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 97 to Map 98 (bottom)"],
360: [361, 0, 0, 224, 219, "", b"", False, True, False, "Mu: Map 97 to Map 96 (bottom)"],
361: [360, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 96 to Map 97 (bottom)"],
362: [363, 0, 0, 230, 216, "", b"", False, True, False, "Mu: Map 98 to Map 95 (bottom)"],
363: [362, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 95 to Map 98 (bottom)"],
364: [365, 0, 0, 230, 235, "", b"", False, True, False, "Mu: Map 98 to Map 100 (bottom)"],
365: [364, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 100 to Map 98 (bottom)"],
366: [367, 0, 0, 235, 239, "", b"", False, True, False, "Mu: Map 100 to Map 101 (bottom)"],
367: [366, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 101 to Map 100 (bottom)"],
368: [369, 0, 0, 239, 240, "", b"", False, True, False, "Mu: Map 101 to Map 102"],
369: [368, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 102 to Map 101"],
# Angel Village
382: [383, 0, 0, 250, 210, "1941e", b"", False, False, False, "Angel: Mu Passage (in)"],
383: [382, 0, 0, 0, 0, "191e2", b"", False, False, False, "Angel: Mu Passage (out)"], #custom
384: [385, 0, 0, 250, 251, "1942a", b"", False, False, False, "Angel: Underground entrance (in)"],
385: [384, 0, 0, 0, 0, "19446", b"", False, False, False, "Angel: Underground entrance (out)"],
386: [387, 0, 0, 251, 252, "19452", b"", False, False, False, "Angel: Room 1 (in)"],
387: [386, 0, 0, 0, 0, "194de", b"", False, False, False, "Angel: Room 1 (out)"],
388: [389, 0, 0, 251, 253, "19476", b"", False, False, False, "Angel: Room 2 (in)"],
389: [388, 0, 0, 0, 0, "19502", b"", False, False, False, "Angel: Room 2 (out)"],
390: [391, 0, 0, 251, 254, "1945e", b"", False, False, False, "Angel: Dance Hall (in)"],
391: [390, 0, 0, 0, 0, "1950e", b"", False, False, False, "Angel: Dance Hall (out)"],
392: [393, 0, 0, 251, 255, "1946a", b"", False, False, False, "Angel: DS Room (in)"],
393: [392, 0, 0, 0, 0, "194f6", b"", False, False, False, "Angel: DS Room (out)"],
# Angel Dungeon
400: [401, 0, 0, 251, 260, "19482", b"", False, True, True, "Angel Dungeon entrance"],
401: [400, 0, 0, 0, 0, "19534", b"", False, True, True, "Angel Dungeon exit"],
402: [403, 0, 0, 260, 261, "19528", b"", False, True, False, "Angel Dungeon: Map 109 to Map 110"],
403: [402, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 110 to Map 109"],
404: [405, 0, 0, 278, 262, "", b"", False, True, False, "Angel Dungeon: Map 110 to Map 111"],
405: [404, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 111 to Map 110"],
406: [407, 0, 0, 262, 263, "", b"", False, True, False, "Angel Dungeon: Map 111 to Map 112"],
407: [406, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 112 to Map 111"],
408: [409, 0, 0, 264, 265, "", b"", False, True, False, "Angel Dungeon: Map 112 to Chest"],
409: [408, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Chest to Map 112"],
410: [411, 0, 0, 279, 266, "", b"", False, True, False, "Angel Dungeon: Map 112 to Map 113"],
411: [410, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 113 to Map 112"],
412: [413, 0, 0, 266, 267, "", b"", False, True, False, "Angel Dungeon: Map 113 to Map 114"],
413: [412, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 114 to Map 113"],
414: [415, 0, 0, 268, 276, "", b"", False, True, False, "Angel Dungeon: Map 114 to Ishtar Foyer"],
415: [414, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Ishtar Foyer to Map 114"],
# Ishtar's Studio
420: [421, 0, 0, 277, 269, "196b6", b"", False, False, False, "Ishtar entrance"],
421: [420, 0, 0, 0, 0, "196c2", b"", False, False, False, "Ishtar exit"],
422: [423, 0, 0, 269, 270, "196ce", b"", False, False, False, "Ishtar: Portrait room (in)"],
423: [422, 0, 0, 0, 0, "196f4", b"", False, False, False, "Ishtar: Portrait room (out)"],
424: [425, 0, 0, 269, 271, "196da", b"", False, False, False, "Ishtar: Side room (in)"],
425: [424, 0, 0, 0, 0, "19700", b"", False, False, False, "Ishtar: Side room (out)"],
426: [427, 0, 0, 269, 272, "196e6", b"", False, False, False, "Ishtar: Ishtar's room (in)"],
427: [426, 0, 0, 0, 0, "1970c", b"", False, False, False, "Ishtar: Ishtar's room (out)"],
428: [429, 0, 0, 272, 274, "19718", b"", False, False, False, "Ishtar: Puzzle room (in)"],
429: [428, 0, 0, 0, 0, "197e6", b"", False, False, False, "Ishtar: Puzzle room (out)"],
# Watermia
440: [441, 0, 0, 280, 286, "197f4", b"", False, False, False, "Watermia: Lance House (in)"],
441: [440, 0, 0, 0, 0, "1983e", b"", False, False, False, "Watermia: Lance House (out)"],
442: [443, 0, 0, 280, 282, "19818", b"", False, False, False, "Watermia: DS House (in)"],
443: [442, 0, 0, 0, 0, "19868", b"", False, False, False, "Watermia: DS House (out)"],
444: [445, 0, 0, 280, 283, "1980c", b"", False, False, False, "Watermia: Gambling House (in)"],
445: [444, 0, 0, 0, 0, "1985a", b"", False, False, False, "Watermia: Gambling House (out)"],
446: [447, 0, 0, 280, 284, "19824", b"", False, False, False, "Watermia: West House (in)"],
447: [446, 0, 0, 0, 0, "19882", b"", False, False, False, "Watermia: West House (out)"],
448: [449, 0, 0, 280, 285, "19830", b"", False, False, False, "Watermia: East House (in)"],
449: [448, 0, 0, 0, 0, "19890", b"", False, False, False, "Watermia: East House (out)"],
450: [451, 0, 0, 280, 287, "19800", b"", False, False, False, "Watermia: NW House (in)"],
451: [450, 0, 0, 0, 0, "1984c", b"", False, False, False, "Watermia: NW House (out)"],
452: [453, 0, 0, 288, 311, "", b"", False, False, True, "Watermia: Euro passage"],
453: [452, 0, 0, 0, 0, "", b"", False, False, True, "Euro: Watermia passage"],
# Great Wall
462: [463, 0, 0, 290, 291, "", b"", False, True, False, "Great Wall: Map 130 to Map 131"],
463: [462, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 131 to Map 130"],
464: [465, 0, 0, 293, 294, "", b"", False, True, False, "Great Wall: Map 131 to Map 133"],
465: [464, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 133 to Map 131"],
466: [467, 0, 0, 296, 297, "", b"", False, True, False, "Great Wall: Map 133 to Map 134"],
467: [466, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 134 to Map 133"],
468: [469, 0, 0, 297, 298, "", b"", False, True, False, "Great Wall: Map 134 to Map 135"],
469: [468, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 135 to Map 134"],
470: [471, 0, 0, 299, 300, "", b"", False, True, False, "Great Wall: Map 135 to Map 136"],
471: [470, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 136 to Map 135"],
# Euro
482: [483, 0, 0, 310, 312, "19cd2", b"", False, False, False, "Euro: Rolek Company (in)"],
483: [482, 0, 0, 0, 0, "19d74", b"", False, False, False, "Euro: Rolek Company (out)"],
484: [485, 0, 0, 310, 313, "19d0e", b"", False, False, False, "Euro: West House (in)"],
485: [484, 0, 0, 0, 0, "19e12", b"", False, False, False, "Euro: West House (out)"],
486: [487, 0, 0, 310, 314, "19cde", b"", False, False, False, "Euro: Rolek Mansion West (in)"],
487: [486, 0, 0, 0, 0, "19d9c", b"", False, False, False, "Euro: Rolek Mansion West (out)"],
488: [489, 0, 0, 310, 314, "19cea", b"", False, False, False, "Euro: Rolek Mansion East (in)"],
489: [488, 0, 0, 0, 0, "19da8", b"", False, False, False, "Euro: Rolek Mansion East (out)"],
490: [491, 0, 0, 310, 317, "19d26", b"", False, False, False, "Euro: Central House (in)"],
491: [490, 0, 0, 0, 0, "19e54", b"", False, False, False, "Euro: Central House (out)"],
492: [493, 0, 0, 310, 318, "19d32", b"", False, False, False, "Euro: Jeweler House (in)"],
493: [492, 0, 0, 0, 0, "19e62", b"", False, False, False, "Euro: Jeweler House (out)"],
494: [495, 0, 0, 310, 319, "19d3e", b"", False, False, False, "Euro: Twins House (in)"],
495: [494, 0, 0, 0, 0, "19e70", b"", False, False, False, "Euro: Twins House (out)"],
496: [497, 0, 0, 310, 320, "19cc6", b"", False, False, False, "Euro: Hidden House (in)"],
497: [496, 0, 0, 0, 0, "19d66", b"", False, False, False, "Euro: Hidden House (out)"],
498: [499, 0, 0, 310, 321, "19d4a", b"", False, False, False, "Euro: Shrine (in)"],
499: [498, 0, 0, 0, 0, "19e7e", b"", False, False, False, "Euro: Shrine (out)"],
500: [501, 0, 0, 310, 322, "19cba", b"", False, False, False, "Euro: Explorer's House (in)"],
501: [500, 0, 0, 0, 0, "19d58", b"", False, False, False, "Euro: Explorer's House (out)"],
502: [ 0, 0, 0, 310, 323, "19cf6", b"", False, False, False, "Euro: Store Entrance (in)"],
#503: [502, 0, 0, 0, 0, "", b"", False, False, False, "Euro: Store Entrance (out)"], #this doesn't exist!
504: [505, 0, 0, 310, 324, "19d02", b"", False, False, False, "Euro: Store Exit (in)"],
505: [504, 0, 0, 0, 0, "19e04", b"", False, False, False, "Euro: Store Exit (out)"],
506: [507, 0, 0, 314, 316, "19db4", b"", False, False, False, "Euro: Guest Room (in)"],
507: [506, 0, 0, 0, 0, "19df6", b"", False, False, False, "Euro: Guest Room (out)"],
508: [509, 0, 0, 310, 325, "19d1a", b"", False, False, False, "Euro: Dark Space House (in)"],
509: [508, 0, 0, 0, 0, "19e20", b"", False, False, False, "Euro: Dark Space House (out)"],
# Mt. Kress
522: [523, 0, 0, 330, 331, "", b"", False, True, False, "Mt. Kress: Map 160 to Map 161"],
523: [522, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 161 to Map 160"],
524: [525, 0, 0, 332, 333, "", b"", False, True, False, "Mt. Kress: Map 161 to Map 162 (W)"],
525: [524, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 161 (W)"],
526: [527, 0, 0, 332, 334, "", b"", False, True, False, "Mt. Kress: Map 161 to Map 162 (E)"],
527: [526, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 161 (E)"],
528: [529, 0, 0, 333, 337, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 163 (N)"],
529: [528, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 163 to Map 162 (N)"],
530: [531, 0, 0, 337, 336, "", b"", False, True, False, "Mt. Kress: Map 163 to Map 162 (S)"],
531: [530, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 163 (S)"],
532: [533, 0, 0, 333, 338, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 164"],
533: [532, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 164 to Map 162"],
534: [535, 0, 0, 335, 339, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 165"],
535: [534, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 162"],
536: [537, 0, 0, 339, 342, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 166"],
537: [536, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 166 to Map 165"],
538: [539, 0, 0, 340, 343, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 167"],
539: [538, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 167 to Map 165"],
540: [541, 0, 0, 341, 344, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 168"],
541: [540, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 168 to Map 165"],
542: [543, 0, 0, 344, 345, "", b"", False, True, False, "Mt. Kress: Map 168 to Map 169"],
543: [542, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 169 to Map 168"],
# Native's Village
552: [553, 0, 0, 350, 352, "19fe6", b"", False, False, False, "Native's Village: West House (in)"],
553: [552, 0, 0, 0, 0, "1a00c", b"", False, False, False, "Native's Village: West House (out)"],
554: [555, 0, 0, 350, 353, "19ff2", b"", False, False, False, "Native's Village: House w/Statues (in)"],
555: [554, 0, 0, 0, 0, "1a01a", b"", False, False, False, "Native's Village: House w/Statues (out)"],
556: [557, 0, 0, 351, 400, "", b"", False, False, True, "Native's Village: Dao Passage"],
557: [556, 0, 0, 0, 0, "", b"", False, False, True, "Dao: Natives' Passage"],
# Ankor Wat
562: [563, 0, 0, 360, 361, "1a028", b"", False, True, False, "Ankor Wat: Map 176 to Map 177"],
563: [562, 0, 0, 0, 0, "1a036", b"", False, True, False, "Ankor Wat: Map 177 to Map 176"],
564: [565, 0, 0, 361, 363, "", b"", False, True, False, "Ankor Wat: Map 177 to Map 178"],
565: [564, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 178 to Map 177"],
566: [567, 0, 0, 365, 366, "", b"", False, True, False, "Ankor Wat: Map 178 to Map 179"],
567: [566, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 178"],
568: [569, 0, 0, 368, 367, "", b"", False, True, False, "Ankor Wat: Map 180 to Map 179"],
569: [568, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 180"],
570: [571, 0, 0, 367, 369, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 181"],
571: [570, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 181 to Map 179"],
572: [573, 0, 0, 371, 362, "", b"", False, True, False, "Ankor Wat: Map 181 to Map 177"],
573: [572, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 177 to Map 181"],
574: [575, 0, 0, 362, 372, "", b"", False, True, False, "Ankor Wat: Map 177 to Map 182"], # Garden
575: [574, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 182 to Map 177"],
576: [577, 0, 0, 372, 373, "", b"", False, True, False, "Ankor Wat: Map 182 to Map 183"],
577: [576, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 182"],
578: [579, 0, 0, 373, 376, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 184"],
579: [578, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 184 to Map 183"],
580: [581, 0, 0, 374, 378, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 185 (W)"],
581: [580, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 185 to Map 183 (W)"],
582: [583, 0, 0, 378, 375, "", b"", False, True, False, "Ankor Wat: Map 185 to Map 183 (E)"],
583: [582, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 185 (E)"],
584: [585, 0, 0, 375, 379, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 186"],
585: [584, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 183"],
586: [587, 0, 0, 379, 381, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 187 (W)"],
587: [586, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 186 (W)"],
588: [589, 0, 0, 381, 380, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 186 (E)"],
589: [588, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 187 (E)"],
590: [591, 0, 0, 381, 384, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 188"],
591: [590, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187"],
592: [593, 0, 0, 393, 386, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 189"],
593: [592, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 188"],
594: [595, 0, 0, 387, 389, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 190 (E)"],
595: [594, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (E)"],
596: [596, 0, 0, 388, 390, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 190 (W)"],
597: [597, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (W)"],
598: [599, 0, 0, 390, 391, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 191"],
599: [598, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 191 to Map 190"],
600: [ 0, 0, 0, 366, 368, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 180 (drop)"],
601: [ 0, 0, 0, 392, 381, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NW-L (drop)"],
602: [ 0, 0, 0, 392, 381, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NW-R (drop)"],
603: [ 0, 0, 0, 392, 383, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NE (drop)"],
604: [ 0, 0, 0, 393, 382, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 SW (drop)"],
605: [ 0, 0, 0, 389, 388, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (drop)"],
# Dao
612: [613, 0, 0, 400, 401, "1a27c", b"", False, False, False, "Dao: NW House (in)"],
613: [612, 0, 0, 0, 0, "1a2d2", b"", False, False, False, "Dao: NW House (out)"],
614: [615, 0, 0, 400, 402, "1a288", b"", False, False, False, "Dao: Neil's House (in)"],
615: [614, 0, 0, 0, 0, "1a30a", b"", False, False, False, "Dao: Neil's House (out)"],
616: [617, 0, 0, 400, 403, "1a294", b"", False, False, False, "Dao: Snake Game House (in)"],
617: [616, 0, 0, 0, 0, "1a2ee", b"", False, False, False, "Dao: Snake Game House (out)"],
618: [619, 0, 0, 400, 404, "1a2a0", b"", False, False, False, "Dao: SW House (in)"],
619: [618, 0, 0, 0, 0, "1a2fc", b"", False, False, False, "Dao: SW House (out)"],
620: [621, 0, 0, 400, 405, "1a2ac", b"", False, False, False, "Dao: S House (in)"],
621: [620, 0, 0, 0, 0, "1a2e0", b"", False, False, False, "Dao: S House (out)"],
622: [623, 0, 0, 400, 406, "1a2b8", b"", False, False, False, "Dao: SE House (in)"],
623: [622, 0, 0, 0, 0, "1a318", b"", False, False, False, "Dao: SE House (out)"],
# Pyramid
634: [635, 0, 0, 411, 415, "", b"", False, True, False, "Pyramid: Map 204 to Map 205"],
635: [634, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 205 to Map 204"],
636: [637, 0, 0, 413, 416, "", b"", False, True, False, "Pyramid: Map 204 to Map 206"], # Room 1
637: [636, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 206 to Map 204"],
638: [639, 0, 0, 417, 418, "", b"", False, True, False, "Pyramid: Map 206 to Map 207"],
639: [638, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 207 to Map 206"],
640: [641, 0, 0, 419, 442, "", b"", False, True, False, "Pyramid: Map 207 to Map 218"],
641: [640, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 207"],
642: [643, 0, 0, 413, 420, "", b"", False, True, False, "Pyramid: Map 204 to Map 208"], # Room 2
643: [642, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 208 to Map 204"],
644: [645, 0, 0, 421, 422, "", b"", False, True, False, "Pyramid: Map 208 to Map 209"],
645: [644, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 209 to Map 208"],
646: [647, 0, 0, 423, 443, "", b"", False, True, False, "Pyramid: Map 209 to Map 218"],
647: [646, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 209"],
648: [649, 0, 0, 413, 431, "", b"", False, True, False, "Pyramid: Map 204 to Map 214"], # Room 3
649: [648, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 214 to Map 204"],
650: [651, 0, 0, 434, 435, "", b"", False, True, False, "Pyramid: Map 214 to Map 215"],
651: [650, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 214 to Map 215"],
652: [653, 0, 0, 435, 444, "", b"", False, True, False, "Pyramid: Map 215 to Map 218"],
653: [652, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 215"],
654: [655, 0, 0, 413, 436, "", b"", False, True, False, "Pyramid: Map 204 to Map 216"], # Room 4
655: [654, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 216 to Map 204"],
656: [657, 0, 0, 437, 438, "", b"", False, True, False, "Pyramid: Map 216 to Map 217"],
657: [656, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 217 to Map 216"],
658: [659, 0, 0, 439, 440, "", b"", False, True, False, "Pyramid: Map 217 to Map 219"],
659: [658, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 219 to Map 217"],
660: [661, 0, 0, 441, 445, "", b"", False, True, False, "Pyramid: Map 219 to Map 218"],
661: [660, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 219"],
662: [663, 0, 0, 413, 426, "", b"", False, True, False, "Pyramid: Map 204 to Map 212"], # Room 5
663: [662, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 212 to Map 204"],
664: [665, 0, 0, 429, 430, "", b"", False, True, False, "Pyramid: Map 212 to Map 213"],
665: [664, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 213 to Map 212"],
666: [667, 0, 0, 430, 446, "", b"", False, True, False, "Pyramid: Map 213 to Map 218"],
667: [666, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 213"],
668: [669, 0, 0, 413, 424, "", b"", False, True, False, "Pyramid: Map 204 to Map 210"], # Room 6
669: [668, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 210 to Map 204"],
670: [671, 0, 0, 424, 425, "", b"", False, True, False, "Pyramid: Map 210 to Map 211"],
671: [670, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 211 to Map 210"],
672: [673, 0, 0, 425, 447, "", b"", False, True, False, "Pyramid: Map 211 to Map 218"],
673: [672, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 211"],
# Babel
682: [683, 0, 0, 460, 461, "", b"", False, True, False, "Babel: Map 222 to Map 223"],
683: [682, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 223 to Map 222"],
684: [685, 0, 0, 462, 463, "", b"", False, True, False, "Babel: Map 223 to Map 224"],
685: [684, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 224 to Map 223"],
686: [687, 0, 0, 463, 474, "", b"", False, True, False, "Babel: Map 224 to Map 242"], # Castoth
687: [686, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 242 to Map 224"],
688: [689, 0, 0, 463, 475, "", b"", False, True, False, "Babel: Map 224 to Map 243"], # Viper
689: [688, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 243 to Map 224"],
690: [691, 0, 0, 463, 465, "", b"", False, True, False, "Babel: Map 224 to Map 225 (bottom)"],
691: [690, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 225 to Map 224 (bottom)"],
692: [693, 0, 0, 466, 464, "", b"", False, True, False, "Babel: Map 225 to Map 224 (top)"],
693: [692, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 224 to Map 225 (top)"],
694: [695, 0, 0, 464, 476, "", b"", False, True, False, "Babel: Map 224 to Map 244"], # Vampires
695: [694, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 244 to Map 224"],
696: [697, 0, 0, 464, 477, "", b"", False, True, False, "Babel: Map 224 to Map 245"], # Sand Fanger
697: [696, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 245 to Map 224"],
698: [699, 0, 0, 464, 469, "", b"", False, True, False, "Babel: Map 224 to Map 226"],
699: [698, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 226 to Map 224"],
#700: [701, 0, 0, 470, 471, "", b"", False, True, False, "Babel: Map 226 to Map 227"], #DUPLICATE W/BOSS EXITS
#701: [700, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 227 to Map 226"],
702: [703, 0, 0, 471, 478, "", b"", False, True, False, "Babel: Map 227 to Map 246"], # Mummy Queen
703: [702, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 246 to Map 227"],
704: [705, 0, 0, 471, 467, "", b"", False, True, False, "Babel: Map 227 to Map 225 (bottom)"],
705: [704, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 225 to Map 227 (bottom)"],
706: [707, 0, 0, 468, 472, "", b"", False, True, False, "Babel: Map 225 to Map 227 (top)"],
707: [706, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 227 to Map 225 (top)"],
708: [709, 0, 0, 472, 473, "", b"", False, True, False, "Babel: Map 227 to Map 222"],
709: [708, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 222 to Map 227"],
# Jeweler's Mansion
720: [721, 0, 0, 8, 480, "8d32a", b"", False, True, True, "Mansion entrance"],
721: [720, 0, 0, 480, 400, "8fcb4", b"", False, True, True, "Mansion exit"]
}
|
36289
|
from logging import warning
from api import gitlab
from utilities import validate, types
gitlab = gitlab.GitLab(types.Arguments().url)
def get_all(projects):
snippets = {}
for project in projects:
for key, value in project.items():
details = gitlab.get_project_snippets(key)
if validate.api_result(details):
warning("[*] Found %s snippets for project %s", len(details), value)
for item in details:
snippets.update({item['id']: item['web_url']})
return snippets
def sniff_secrets(snippets):
if len(snippets) == 0:
return []
secrets = []
raw_data = {}
for snippet_id, snippet_url in snippets.items():
raw_content = gitlab.get_snippet_raw(snippet_id)
raw_data.update({snippet_url: raw_content})
if len(raw_data) > 0:
monitor = types.SecretsMonitor()
found_secrets = monitor.sniff_secrets(raw_data)
for secret in found_secrets:
secrets.append(secret)
return secrets
|
36295
|
import os
import aiohttp
import asyncio
import json
import time
import datetime
import logging
import gidgethub
import requests
from gidgethub import aiohttp as gh_aiohttp
import sys
import pandas as pd
sys.path.append("..")
from utils.auth import get_jwt, get_installation, get_installation_access_token
from utils.test_auth_ipipe import xlyOpenApiRequest
from utils.readConfig import ReadConfig
logging.basicConfig(
level=logging.INFO,
filename='../logs/regularMark.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
localConfig = ReadConfig(path='../conf/config.ini')
class MarkTimeoutCI(object):
def __init__(self, user, repo, gh):
self.pr_url = 'https://api.github.com/repos/%s/%s/pulls?per_page=100&page=1&q=addClass' % (
user, repo)
self.gh = gh
self.user = user
self.repo = repo
self.mark_url = 'https://xly.bce.baidu.com/open-api/ipipe/rest/v1/job-builds/{}/mark'
self.rerun_url = 'http://www.cipaddlepaddle.cn:8081/%s/%s/{}/{}' % (
user, repo)
self.comment_url = 'https://api.github.com/repos/%s/%s/issues/{}/comments' % (
user, repo)
def getNextUrl(self, link):
"""遍历所有的PR"""
next_str = None
for i in link.split(','):
if 'rel="next"' in i:
next_str = i
break
if next_str != None:
start_index = next_str.index('<')
end_index = next_str.index('>')
url = next_str[start_index + 1:end_index]
else:
url = None
return url
async def getBeforeSevenDaysPRList(self):
"""
1. 获取距离现在7天-30天创建的PR列表:只获取,不做处理
2. 30天之前的暂不处理: 默认认为GitHub已经设它们为code conflicts. 如有需要,后续在处理。
return : [{PR, commit, status_url}]
"""
today = datetime.date.today()
seven_Days_ago = str(today - datetime.timedelta(days=7))
month_Days_ago = str(today - datetime.timedelta(days=30))
overduelist = []
while (self.pr_url != None):
(code, header, body) = await self.gh._request(
"GET", self.pr_url,
{'accept': 'application/vnd.github.antiope-preview+json'})
res = json.loads(body.decode('utf8'))
for item in res:
if item['created_at'] < seven_Days_ago and item[
'created_at'] > month_Days_ago:
item_dic = {}
item_dic['PR'] = item['number']
item_dic['commit'] = item['head']['sha']
item_dic['status_url'] = item['statuses_url']
overduelist.append(item_dic)
self.pr_url = self.getNextUrl(header['link'])
print("before %s's PRs: %s" % (seven_Days_ago, overduelist))
logger.info("before %s's PRs: %s" % (seven_Days_ago, overduelist))
return overduelist
async def getCIstatus(self):
"""
获取符合条件的PR的CI列表:
1. 获取PR最新的commit url
2. 获取1的commit的最近的CI(去除一些GitHub的脏数据(eg. pending状态的))
3. 判断最近的CI是否是7天之前的,只要有一条CI是7天之前的就需要标记
4. 只标记成功的CI为失败
"""
PRList = await self.getBeforeSevenDaysPRList()
today = datetime.date.today()
seven_Days_ago = str(today - datetime.timedelta(days=7))
CI_STATUS_LIST = []
for item in PRList:
commit_ci_status = {}
commit_ci_status['PR'] = item['PR']
commit_ci_status['commit'] = item['commit']
status_url = item['status_url']
res = requests.get(status_url,
headers={'authorization': "token xxx"},
timeout=15).json()
commit_ci_status['CI'] = []
if_before_seven_day = [] #标记是否所有的CI都是7天之前的
for ci in res:
already_exit = False
if ci['context'] != 'license/cla':
for i in commit_ci_status['CI']:
if ci['context'] == i['ciName'] and i['time'] > ci[
'created_at']: #删除一些脏数据 github api
already_exit = True
break
if already_exit == False:
item_dic = {}
item_dic['time'] = ci['created_at']
item_dic['ciName'] = ci['context']
item_dic['status'] = ci['state']
item_dic['markId'] = ci['target_url'].split('/')[-1]
commit_ci_status['CI'].append(item_dic)
if item_dic['time'] > seven_Days_ago: #最新的一次CI不是7天之前的
if_before_seven_day.append(False)
else:
if_before_seven_day.append(True) #True 是7天之前的
if True in if_before_seven_day: #只要有一个CI是七天之前的就必须标记
print('%s is 7 ago..........' % item['PR'])
CI_STATUS_LIST.append(commit_ci_status)
else:
print('%s not 7 ago' % item['PR'])
logger.info("need to mark ci list: %s" % CI_STATUS_LIST)
return CI_STATUS_LIST
async def markCIFailed(self):
"""
mark success/pending ci to failed
"""
CIStatusList = await self.getCIstatus()
REQUIRED_CI = localConfig.cf.get('%s/%s' % (self.user, self.repo),
'REQUIRED_CI')
DATA = {"data": "FAIL", "message": "Paddle-bot", "type": "MARK"}
json_str = json.dumps(DATA)
headers = {
"Content-Type": "application/json",
"IPIPE-UID": "Paddle-bot"
}
for item in CIStatusList:
PR = item['PR']
commit = item['commit']
ci_list = item['CI']
mark_ci_list = []
for ci in ci_list:
if ci['ciName'] in REQUIRED_CI and ci[
'status'] in ['success', 'pending']:
markId = ci['markId']
mark_url = self.mark_url.format(markId)
res = xlyOpenApiRequest().post_method(
mark_url, json_str, headers=headers)
if res.status_code == 200 or res.status_code == 201:
mark_ci_list.append(ci['ciName'])
print('%s_%s_%s mark success!' %
(PR, commit, ci['ciName']))
logger.info('%s_%s_%s mark success!' %
(PR, commit, ci['ciName']))
else:
print('%s_%s_%s mark failed!' %
(PR, commit, ci['ciName']))
logger.error('%s_%s_%s mark failed!' %
(PR, commit, ci['ciName']))
if len(mark_ci_list) > 0:
marked = self.queryIfHasMark(PR, commit)
if marked == False:
self.inform(item)
else:
print('%s_%s has marked!!!!' % (PR, commit))
logger.info('%s_%s has marked!!!!' % (PR, commit))
data = {
'TIME': time.strftime("%Y%m%d %H:%M:%S", time.localtime()),
'PR': PR,
'COMMITID': commit,
'CINAME': mark_ci_list
}
self.save_markci_job(data)
def queryIfHasMark(self, PR, commitid):
"""marked 是否已经标记过"""
marked = True
df = pd.read_csv('../buildLog/mark_timeout_ci.csv')
queryKey = df[(df['PR'] == PR) & (df['COMMITID'] == commitid)]
if queryKey.empty:
marked = False
return marked
def create_markci_csv(self, filename):
"""创建存储文件"""
df = pd.DataFrame(columns=['TIME', 'PR', 'COMMITID', 'CINAME'])
df.to_csv(filename)
def save_markci_job(self, data):
"""将kill的任务存到"""
filename = '../buildLog/mark_timeout_ci.csv'
if os.path.exists(filename) == False:
self.create_markci_csv(filename)
write_data = pd.DataFrame(data)
write_data.to_csv(filename, mode='a', header=False)
async def inform(self, item):
"""Paddle-bot发出评论"""
#POST /repos/:owner/:repo/issues/:issue_number/comments
rerun_ci_link = self.rerun_url.format(item['PR'], item['commit'])
comment_url = self.comment_url.format(item['PR'])
shortId = item['commit'][0:7]
message = "Sorry to inform you that %s's CIs have passed for more than 7 days. To prevent PR conflicts, you need to re-run all CIs manually. " % shortId
await self.gh.post(comment_url, data={"body": message})
async def main(user, repo):
async with aiohttp.ClientSession() as session:
app_id = os.getenv("GH_APP_ID")
jwt = get_jwt(app_id)
gh = gh_aiohttp.GitHubAPI(session, user)
try:
installation = await get_installation(gh, jwt, user)
except ValueError as ve:
print(ve)
else:
access_token = await get_installation_access_token(
gh, jwt=jwt, installation_id=installation["id"])
# treat access_token as if a personal access token
gh = gh_aiohttp.GitHubAPI(
session, user, oauth_token=access_token["token"])
markCIObject = MarkTimeoutCI(user, repo, gh)
await markCIObject.markCIFailed()
loop = asyncio.get_event_loop()
loop.run_until_complete(main('PaddlePaddle', 'Paddle'))
|
36298
|
from .abstract import AbstractAgentBasedModel
import keras.backend as K
import numpy as np
from tensorflow import TensorShape
from keras.layers import Dense, Reshape
class TrajectorySamplerNetwork(AbstractAgentBasedModel):
'''
Supervised model. Takes in a set of trajectories from the current state;
learns a distribution that will regenerate these given some source of
noise.
Essentially, our goal is to minimize the average error between the whole
set of trajectories and our samples.
'''
def __init__(self):
pass
def AddSamplerLayer(x, num_samples, traj_length, feature_size, activation=None):
'''
Size of x must be reasonable. This turns the dense input into something
reasonable.
Parameters:
x: input tensor
num_samples: number of trajectories to generate
traj_length: how many points we want to sample in each trajectory
feature_size: dimensionality of each trajectory point
activation: optional activation function to add
'''
x = Dense(num_samples * traj_length * feature_size)(x)
if activation is not None:
x = activation(x)
x = Reshape((num_samples, traj_length, feature_size))(x)
return x
class TrajectorySamplerLoss(object):
def __init__(self, num_samples, traj_length, feature_size, acc_cost=None):
self.num_samples = num_samples
self.traj_length = traj_length
self.feature_size = feature_size
self.acc_cost = acc_cost
self.__name__ = "trajectory_sampler_loss"
def __call__(self, target, pred):
'''
Pred must be of size:
[batch_size=None, num_samples, traj_length, feature_size]
Targets must be of size:
[batch_size=None, traj_length, feature_size]
You can use the tools in "split" to generate this sort of data (for
targets). The actual loss function is just the L2 norm between each
point.
'''
# NOTE: cannot tile here, because target and pred have to be the same
# size. THAKS A LOT, KERAS.
# Tile each example point by the total number of samples
# target = K.tile(target, TensorShape([1,self.num_samples,1,1]))
# Compute L2 norm...
x = K.square(target - pred)
# sum along each output dimension for each point
x = K.sum(x,axis=-1,keepdims=False)
# square root and sum along each trajectory
x = K.sum(K.sqrt(x),axis=2,keepdims=False)
# mean across each sample
#x = K.min(x,axis=1,keepdims=False)
x = K.mean(x,axis=1,keepdims=False) #+ K.min(x,axis=1,keepdims=False)
if self.acc_cost is not None:
# Take the L2 norm of the acceleration output and add it to the
# loss.
# NOTE: we may end up computing this elsewhere to avoid extra
# penalties and stuff like that.
#cost = K.sum(K.square(acc))
return x + cost
else:
return x
|
36305
|
import numpy as np
from rampwf.utils import BaseGenerativeRegressor
class GenerativeRegressor(BaseGenerativeRegressor):
def __init__(self, max_dists, target_dim):
self.decomposition = 'autoregressive'
def fit(self, X_array, y_array):
pass
def predict(self, X_array):
# constant prediction with value equal to 10
n_samples = X_array.shape[0]
types = ['norm']
means = np.full(shape=(n_samples, 1), fill_value=10)
sigmas = np.zeros((n_samples, 1))
params = np.concatenate((means, sigmas), axis=1)
weights = np.ones((n_samples, 1))
return weights, types, params
|
36342
|
import cv2 as cv # opencv
import copy # for deepcopy on images
import numpy as np # numpy
from random import randint # for random values
import threading # for deamon processing
from pathlib import Path # for directory information
import os # for directory information
from constants import constants # constants
class PlantDetector:
"""Dynamically apply detection algorithms to source images
All images are sourced from and follow naming standard from
the KOMATSUNA dataset
http://limu.ait.kyushu-u.ac.jp/~agri/komatsuna/
METHODS
__init__(self, src='multi_plant', labels='multi_label') [void]
prepares the images and labels for display
initializes windows and trackbars
runs background subtraction on plant group images
on_low_H_thresh_trackbar(self, val)
on_high_H_thresh_trackbar(self, val)
on_low_S_thresh_trackbar(self, val)
on_high_S_thresh_trackbar(self, val)
on_low_V_thresh_trackbar(self, val)
on_high_V_thresh_trackbar(self, val)
HSV trackbar triggers
prepare_plant_collection(self, src, labelsrc)
returns [plants, plant_groups, labels]
constructor helper function for loading plant images
parse(self, auto_inc=False, mode=0) [void]
main function
dynamically applies
HSV inRange filters
watershed algorithm
to the currently displayed image
based on selected HSV trackbar values
six modes are displayable:
mode: window1 + window2
0 : original (fallback) + original
1 : HSV filter range + original
2 : bare watershed masks + labels
3 : watershed masks w/ bg + original
4 : sequential bg sub + original
5 : seq bg sub w/ watersh + original
additionally, the user is allowed control
key | function
m | next image
n | prev image
s | save selected image in the selected mode
z | save all images in selected mode
esc | exit the program
d | dynamically calculate dice
f | show dice data based on saved images
1-5 | select the respective mode
parse is also used for saving all images
parse is run for all images in the given mode
either in parrallel or in place
save_one(self, mode, image, filename) [void]
saves the image in the appropriate mode folder with filename
HSV_filtering_and_watershed(self, input_im) [mask, input_im, im_threshold]
image is filtered through HSV inRange according to trackbar values
image is prepared (threshold) for watershed algorithm
watershed algorithm is applied
markers are applied to image
dicify_wrapper(self, image_id) [void]
runs dice summary in background
dicify_summary(self, image_id) [void]
prints summary of dice values for image, plant, dateset
note: based on saved images
dicify_one(self, image_id) [dice]
returns the dice value for the given image_id
based on saved segmentation and label images
dicify_one_dynamic(self, mask, image_id) [dice]
returns dice value for the given image_id
based on given mask (current) and saved label image
dicify_plant(self, plant_id) [mean, min, max]
returns mean, min and max dice values for images in plant group
dicify_all(self) [mean, min, max]
returns mean, min and max dice values for images in dataset
and for each plant
"""
def __init__(self, src='multi_plant', labels='multi_label'):
self.c = constants()
self.window1 = self.c.window.window1
self.window2 = self.c.window.window2
cv.namedWindow(self.window1)
cv.namedWindow(self.window2)
cv.moveWindow(self.window2, 550, 90)
cv.createTrackbar(
self.c.HSV.low_H_name, self.window1, self.c.HSV.low_H,
self.c.HSV.max_value_H, self.on_low_H_thresh_trackbar)
cv.createTrackbar(
self.c.HSV.high_H_name, self.window1, self.c.HSV.high_H,
self.c.HSV.max_value_H, self.on_high_H_thresh_trackbar)
cv.createTrackbar(
self.c.HSV.low_S_name, self.window1, self.c.HSV.low_S,
self.c.HSV.max_value, self.on_low_S_thresh_trackbar)
cv.createTrackbar(
self.c.HSV.high_S_name, self.window1, self.c.HSV.high_S,
self.c.HSV.max_value, self.on_high_S_thresh_trackbar)
cv.createTrackbar(
self.c.HSV.low_V_name, self.window1, self.c.HSV.low_V,
self.c.HSV.max_value, self.on_low_V_thresh_trackbar)
cv.createTrackbar(
self.c.HSV.high_V_name, self.window1, self.c.HSV.high_V,
self.c.HSV.max_value, self.on_high_V_thresh_trackbar)
self.plants, self.plant_groups, self.labels = self.prepare_plant_collection(src, labels)
# source https://docs.opencv.org/3.4/d1/dc5/tutorial_background_subtraction.html
for key in self.plant_groups:
if self.c.bgsub.mod == 'MOG2':
backSub = cv.createBackgroundSubtractorMOG2(history=60, detectShadows=True)
elif self.c.bgsub.mod == 'KNN':
backSub = cv.createBackgroundSubtractorKNN()
fgMask = None
for i, image in enumerate(self.plant_groups[key]):
fgMask = backSub.apply(image)
self.plant_groups[key][i] = fgMask
def on_low_H_thresh_trackbar(self, val):
self.c.HSV.low_H = val
self.c.HSV.low_H = min(self.c.HSV.high_H-1, self.c.HSV.low_H)
cv.setTrackbarPos(
self.c.HSV.low_H_name, self.window1, self.c.HSV.low_H)
def on_high_H_thresh_trackbar(self, val):
self.c.HSV.high_H = val
self.c.HSV.high_H = max(self.c.HSV.high_H, self.c.HSV.low_H+1)
cv.setTrackbarPos(
self.c.HSV.high_H_name, self.window1, self.c.HSV.high_H)
def on_low_S_thresh_trackbar(self, val):
self.c.HSV.low_S = val
self.c.HSV.low_S = min(self.c.HSV.high_S-1, self.c.HSV.low_S)
cv.setTrackbarPos(
self.c.HSV.low_S_name, self.window1, self.c.HSV.low_S)
def on_high_S_thresh_trackbar(self, val):
self.c.HSV.high_S = val
self.c.HSV.high_S = max(self.c.HSV.high_S, self.c.HSV.low_S+1)
cv.setTrackbarPos(
self.c.HSV.high_S_name, self.window1, self.c.HSV.high_S)
def on_low_V_thresh_trackbar(self, val):
self.c.HSV.low_V = val
self.c.HSV.low_V = min(self.c.HSV.high_V-1, self.c.HSV.low_V)
cv.setTrackbarPos(
self.c.HSV.low_V_name, self.window1, self.c.HSV.low_V)
def on_high_V_thresh_trackbar(self, val):
self.c.HSV.high_V = val
self.c.HSV.high_V = max(self.c.HSV.high_V, self.c.HSV.low_V+1)
cv.setTrackbarPos(
self.c.HSV.high_V_name, self.window1, self.c.HSV.high_V)
def prepare_plant_collection(self, src, labelsrc):
plants = []
plant_groups = dict()
files = os.listdir(src)
files.sort()
for fl in files:
input_im = cv.imread(src + '/' + fl, cv.IMREAD_COLOR)
if (input_im is None):
exit()
plants.append({
'p': input_im,
'n': fl
})
group_id = f'{fl.split("_")[1]}{fl.split("_")[2]}'
if group_id not in plant_groups:
plant_groups[group_id] = []
plant_groups[group_id].append(input_im)
labels = []
files = os.listdir(labelsrc)
files.sort()
for fl in files:
input_im = cv.imread(labelsrc + '/' + fl)
if (input_im is None):
exit()
labels.append(input_im)
return plants, plant_groups, labels
def parse(self, auto_inc=False, mode=0):
key = 0
i = 0
l_tog = False
while key != self.c.cntr.exit_k:
if auto_inc and i == len(self.plants):
break
image = copy.deepcopy(self.plants[i]['p'])
group_id = f'{self.plants[i]["n"].split("_")[1]}{self.plants[i]["n"].split("_")[2]}'
mask, markers, im_threshold = self.HSV_filtering_and_watershed(image)
_, bgfgSegMarkers, _ = self.HSV_filtering_and_watershed(
cv.cvtColor(self.plant_groups[group_id][i % 60], cv.COLOR_GRAY2BGR)
)
if mode == 5:
alt = bgfgSegMarkers
text = f'Watershed new areas w/ fg/bg segm. {self.plants[i]["n"]}'
tcol = (255, 255, 255)
elif mode == 4:
alt = copy.deepcopy(self.plant_groups[group_id][i % 60])
text = f'FG/BG segmentation {self.plants[i]["n"]}'
tcol = (255, 255, 255)
elif mode == 3:
alt = markers
text = f'Watershed algorithm areas w/ bg {self.plants[i]["n"]}'
tcol = (0, 0, 0)
elif mode == 2:
alt = mask
text = f'Watershed algorithm areas bare {self.plants[i]["n"]}'
tcol = (255, 255, 255)
elif mode == 1:
alt = im_threshold
text = f'HSV inRange threshold {self.plants[i]["n"]}'
tcol = (255, 255, 255)
else:
alt = copy.deepcopy(self.plants[i]['p'])
text = f'Original {self.plants[i]["n"]}'
tcol = (0, 0, 0)
if self.c.asth.text:
cv.putText(alt, text, (0, 20), self.c.asth.font, .5, tcol, 1)
cv.imshow(self.window1, alt)
if l_tog:
cv.imshow(self.window2, self.labels[i])
else:
cv.imshow(self.window2, self.plants[i]['p'])
key = cv.waitKey(10)
if key == self.c.cntr.prev_k and i > 0:
i -= 1
if key == self.c.cntr.next_k and i < len(self.plants) - 1:
i += 1
if key == self.c.cntr.save or auto_inc:
self.save_one(mode, alt, self.plants[i]["n"])
if key == self.c.cntr.save_all:
self.parse(True, mode)
if key == self.c.cntr.dice:
print(self.dicify_one_dynamic(mask, self.plants[i]['n']))
if key == self.c.cntr.dice_more:
self.dicify_wrapper(self.plants[i]['n'])
if key == self.c.cntr.m1_k:
mode = 1
l_tog = False
elif key == self.c.cntr.m2_k:
mode = 2
l_tog = True
elif key == self.<KEY>:
mode = 3
l_tog = False
elif key == self.<KEY>:
mode = 4
l_tog = False
elif key == self.<KEY>:
mode = 5
l_tog = False
if auto_inc:
i += 1
def save_one(self, mode, image, filename):
Path(f'formatted/{self.c.cntr.modes[mode]}').mkdir(parents=True, exist_ok=True)
cv.imwrite(f'formatted/{self.c.cntr.modes[mode]}/{filename}', image)
def HSV_filtering_and_watershed(self, input_im):
im_threshold = cv.inRange(
cv.cvtColor(input_im, cv.COLOR_BGR2HSV),
(self.c.HSV.low_H, self.c.HSV.low_S, self.c.HSV.low_V),
(self.c.HSV.high_H, self.c.HSV.high_S, self.c.HSV.high_V)
)
# source https://docs.opencv.org/master/d3/db4/tutorial_py_watershed.html
kernel = np.ones((3, 3), np.uint8)
opening = cv.morphologyEx(im_threshold, cv.MORPH_OPEN, kernel, iterations=5)
sure_bg = cv.dilate(opening, kernel, iterations=7)
dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)
_, sure_fg = cv.threshold(dist_transform, 0.3*dist_transform.max(), 255, 0)
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg, sure_fg)
_, markers = cv.connectedComponents(sure_fg)
markers = markers + 1
markers[unknown == 255] = 0
markers = cv.watershed(input_im, markers)
input_im[markers == -1] = [255, 0, 0]
for i in range(2, markers.max() + 1):
input_im[markers == i] = [
randint(0, 255), randint(0, 255), randint(0, 255)
] if self.c.xtra.disco else [
(40 + i * 40) % 255, (i * 40) % 255, (50 + i * 40) % 255
]
mask = copy.deepcopy(input_im)
mask[markers < 2] = [0, 0, 0]
return mask, input_im, im_threshold
def dicify_wrapper(self, image_id):
thread = threading.Thread(target=self.dicify_summary, args=(image_id,), daemon=True)
thread.start()
def dicify_summary(self, image_id):
print(self.dicify_all())
def dicify_one(self, image_id):
# Source: https://github.com/Kornelos/CV_MINI_1/blob/master/process_plants.py
img = cv.imread(f'multi_label/label_{image_id.split("_", 1)[1]}')
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, gt = cv.threshold(img, 1, 255, cv.THRESH_BINARY)
img = cv.imread(f'formatted/ws_mask/{image_id}')
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, rt = cv.threshold(img, 1, 255, cv.THRESH_BINARY)
k = 255
dice = np.sum(rt[gt == k]) * 2.0 / (np.sum(rt[rt == k]) + np.sum(gt[gt == k]))
return dice
def dicify_one_dynamic(self, mask, image_id):
img = cv.imread(f'multi_label/label_{image_id.split("_", 1)[1]}')
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, gt = cv.threshold(img, 1, 255, cv.THRESH_BINARY)
img = cv.cvtColor(mask, cv.COLOR_BGR2GRAY)
_, rt = cv.threshold(img, 1, 255, cv.THRESH_BINARY)
k = 255
dice = np.sum(rt[gt == k]) * 2.0 / (np.sum(rt[rt == k]) + np.sum(gt[gt == k]))
return dice
def dicify_plant(self, plant_id):
vals = []
for im_data in [
t for t in self.plants
if t['n'].split('_')[2] == plant_id
]:
vals.append(self.dicify_one(im_data['n']))
return [np.mean(vals), min(vals), max(vals)]
def dicify_all(self):
means = []
mins = []
maxs = []
summ = "id | mean | min | max"
for i in range(0, 5):
plant = self.dicify_plant(f'0{str(i)}')
means.append(plant[0])
mins.append(plant[1])
maxs.append(plant[2])
summ += f'\n0{str(i)} | {round(plant[0], 3)} | {round(plant[1], 3)} | {round(plant[2], 3)}'
summ += f'\nsm | {round(np.mean(means), 3)} | {round(min(mins), 3)} | {round(max(maxs), 3)}'
return summ
# Main
plDt = PlantDetector()
plDt.parse()
|
36349
|
from pathlib import Path
import click
import cligj
import geojson
import mercantile
from shapely.geometry import asShape, box
from shapely.ops import split
@click.command()
@cligj.features_in_arg
@click.option(
'-z',
'--min-zoom',
type=int,
required=True,
help='Min zoom level to create tiles for',
)
@click.option(
'-Z',
'--max-zoom',
type=int,
required=True,
help='Max zoom level to create tiles for (inclusive)',
)
@click.option(
'-d',
'--tile-dir',
type=click.Path(file_okay=False, dir_okay=True, writable=True))
@click.option(
'--allowed-geom-type',
type=str,
required=False,
multiple=True,
default=[],
help='Geometry types to keep in exported GeoJSON features.')
def cut_geojson(features, min_zoom, max_zoom, tile_dir, allowed_geom_type):
"""Cut GeoJSON features into xyz tiles
"""
geometry_types = [
'Point', 'MultiPoint', 'LineString', 'MultiLineString', 'Polygon',
'MultiPolygon']
if not all(t in geometry_types for t in allowed_geom_type):
raise ValueError(f'allowed_geom_type must be one of: {geometry_types}')
tile_dir = Path(tile_dir)
for feature in features:
geometry = asShape(feature['geometry'])
tiles = find_tiles(geometry, min_zoom, max_zoom)
for tile in tiles:
clipped_geometries = clip_geometry_to_tile(geometry, tile)
new_features = []
for clipped_geometry in clipped_geometries:
if allowed_geom_type:
geom_type = clipped_geometry.type
if geom_type not in allowed_geom_type:
print(f'Skipping feature of type: {geom_type}')
continue
new_features.append(
geojson.Feature(
geometry=clipped_geometry,
properties=feature['properties']))
# Write feature to tile_dir
this_tile_dir = (tile_dir / str(tile.z) / str(tile.x))
this_tile_dir.mkdir(parents=True, exist_ok=True)
with open(this_tile_dir / f'{str(tile.y)}.geojson', 'a') as f:
for new_feature in new_features:
f.write(geojson.dumps(new_feature, separators=(',', ':')))
f.write('\n')
def find_tiles(geometry, min_zoom, max_zoom):
assert min_zoom <= max_zoom, 'min zoom must be <= max zoom'
selected_tiles = []
bound_tiles = mercantile.tiles(
*geometry.bounds, zooms=range(min_zoom, max_zoom + 1))
for tile in bound_tiles:
if box(*mercantile.bounds(tile)).intersects(geometry):
selected_tiles.append(tile)
return selected_tiles
def clip_geometry_to_tile(geometry, tile):
tile_geom = box(*mercantile.bounds(tile))
# Geometry collection of split objects
split_gc = split(geometry, tile_geom)
return [g for g in split_gc if tile_geom.contains(g)]
if __name__ == '__main__':
cut_geojson()
|
36368
|
from .uniswap_v2_deltas import *
from .uniswap_v2_events import *
from .uniswap_v2_metadata import *
from .uniswap_v2_spec import *
from .uniswap_v2_state import *
|
36371
|
from __future__ import annotations
import ast
import pytest
from flake8_pie import Flake8PieCheck
from flake8_pie.pie804_no_unnecessary_dict_kwargs import PIE804
from flake8_pie.tests.utils import Error, ex, to_errors
EXAMPLES = [
ex(
code="""
foo(**{"bar": True})
""",
errors=[PIE804(lineno=2, col_offset=6)],
),
ex(
code="""
foo(**{"r2d2": True})
""",
errors=[PIE804(lineno=2, col_offset=6)],
),
ex(
code="""
Foo.objects.create(**{"bar": True})
""",
errors=[PIE804(lineno=2, col_offset=21)],
),
ex(
code="""
Foo.objects.create(**{"_id": some_id})
""",
errors=[PIE804(lineno=2, col_offset=21)],
),
ex(
code="""
Foo.objects.create(**{**bar})
""",
errors=[PIE804(lineno=2, col_offset=21)],
),
ex(
code="""
foo(**{**data, "foo": "buzz"})
foo(**buzz)
foo(**{"bar-foo": True})
foo(**{"bar foo": True})
foo(**{"1foo": True})
foo(**{buzz: True})
foo(**{"": True})
foo(**{f"buzz__{bar}": True})
""",
errors=[],
),
]
@pytest.mark.parametrize("code,errors", EXAMPLES)
def test_examples(code: str, errors: list[Error]) -> None:
expr = ast.parse(code)
assert to_errors(Flake8PieCheck(expr, filename="foo.py").run()) == errors
|
36376
|
import pyClarion.base as clb
import pyClarion.numdicts as nd
import unittest
import unittest.mock as mock
class TestProcess(unittest.TestCase):
@mock.patch.object(clb.Process, "_serves", clb.ConstructType.chunks)
def test_check_inputs_accepts_good_input_structure(self):
process = clb.Process(
expected=[clb.buffer("wm"), clb.terminus("selection")]
)
inputs = {
clb.buffer("wm"): nd.NumDict(default=0),
clb.terminus("selection"): nd.NumDict(default=0),
clb.terminus("selection2"): nd.NumDict(default=0)
}
process.check_inputs(inputs)
@mock.patch.object(clb.Process, "_serves", clb.ConstructType.chunks)
def test_check_inputs_rejects_incomplete_input(self):
process = clb.Process(
expected=[clb.chunks("in"), clb.terminus("selection")]
)
with self.assertRaises(RuntimeError):
inputs = {
# clb.buffer("wm"): nd.NumDict(default=0),
clb.terminus("selection"): nd.NumDict(default=0),
clb.terminus("selection2"): nd.NumDict(default=0)
}
process.check_inputs(inputs)
class TestWrappedProcess(unittest.TestCase):
pass
|
36391
|
from .pycrunchbase import (
CrunchBase,
)
from .resource import (
Acquisition,
Address,
Category,
Degree,
FundingRound,
Fund,
Image,
Investment,
IPO,
Job,
Location,
News,
Organization,
Page,
PageItem,
Person,
Product,
Relationship,
StockExchange,
Video,
Website,
)
__version__ = "0.3.9"
__all__ = [
'Acquisition',
'Address',
'Category',
'Degree',
'FundingRound',
'Fund',
'Image',
'Investment',
'IPO',
'Job',
'Location',
'News',
'Organization',
'Page',
'PageItem',
'Person',
'Product',
'Relationship',
'StockExchange',
'Video',
'Website',
'CrunchBase'
]
|
36422
|
from django.views.generic import DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from schedule.models import Calendar
from schedule.views import CreateEventView, EditEventView, EventMixin
from apps.events.forms import CustomEventForm
class CustomCreateEventView(CreateEventView):
form_class = CustomEventForm
template_name = 'event/edit.html'
def get_context_data(self, **kwargs):
context = super(CustomCreateEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
extra_context = {
"calendar": calendar,
}
context.update(extra_context)
return context
def form_valid(self, form):
super(CustomCreateEventView, self).form_valid(form)
messages.error(self.request, 'Event created successfully.')
return HttpResponseRedirect(
reverse('calendar_details', kwargs={'calendar_slug': self.kwargs.get('calendar_slug')})
)
class CustomUpdateEventView(EditEventView):
form_class = CustomEventForm
template_name = 'event/edit.html'
def get_context_data(self, **kwargs):
context = super(CustomUpdateEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
extra_context = {
"calendar": calendar,
}
context.update(extra_context)
return context
def form_valid(self, form):
super(CustomUpdateEventView, self).form_valid(form)
messages.error(self.request, 'Event edited successfully.')
return HttpResponseRedirect(
reverse('calendar_details', kwargs={'calendar_slug': self.kwargs.get('calendar_slug')})
)
class CustomDeleteEventView(LoginRequiredMixin, EventMixin, DeleteView):
"""Delete Event"""
template_name = 'event/delete.html'
def get_success_url(self):
return reverse('calendar_details', args=[self.kwargs.get('calendar_slug')])
def get_context_data(self, **kwargs):
context = super(CustomDeleteEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
context.update(
{
'event': self.object,
'calendar': calendar
}
)
return context
|
36423
|
import dill
import glob
import csv
import os
from os.path import basename, join
from joblib import Parallel, delayed
domain_path = '/datasets/amazon-data/new-julian/domains'
domain_subdirectory = 'only-overall-lemma-and-label-sampling-1-3-5'
domain_files = glob.glob(join(domain_path,
'only-overall-lemma-and-label/*.csv'))
all_stars_count = {}
output_csv = join(domain_path, domain_subdirectory)
try:
os.makedirs(output_csv)
except OSError:
if not os.path.isdir(output_csv):
raise
def stars(domain_file):
stars_count = [0, 0, 0, 0, 0]
stars_used = [1, 3, 5]
with open(domain_file, 'r') as f:
for line in f:
l = line.replace('\r\n', '').split(',')
stars_count[int(l[0]) - 1] += 1
f_name = '{}.csv'.format(basename(domain_file).split('.')[0])
min_count = min(stars_count)
print '\nDomain: {}\nStars count: {}\nMin star count: {}\n'.format(f_name,
stars_count,
min_count)
stars_count = [0, 0, 0, 0, 0]
with open(domain_file, 'r') as f:
with open(join(output_csv, f_name), 'w') as csv_file:
sent_writer = csv.writer(csv_file, delimiter=',', quotechar=' ',
quoting=csv.QUOTE_MINIMAL)
for line in f:
l = line.replace('\r\n', '').split(',')
star_label = int(l[0])
idx = star_label - 1
stars_count[idx] += 1
if stars_count[idx] <= min_count and star_label in stars_used:
sent_writer.writerow(l)
return {f_name: {'distribution': stars_count,
'star_threshold': min_count,
'skip_stars': stars_used}
}
results = Parallel(n_jobs=-1)(delayed(stars)(i) for i in domain_files)
with open(join(domain_path, domain_subdirectory, 'results.pkl'), 'w') as f:
dill.dump(results, f)
|
36452
|
import torch
from torch import nn
import torch.optim as optim
import torch.multiprocessing as mp
import numpy as np
import time
class MPManager(object):
def __init__(self, num_workers):
"""
manage a single-instruction-multiple-data (SIMD) scheme
:param int num_workers: The number of processors to run.
"""
mp.set_start_method('spawn')
# Counting the current batch size
self.num_workers = num_workers
# A pool of processes
self.pool = mp.Pool(processes=num_workers)
def run(self, function, arguments):
"""
:param function : the instruction
:param arguments : list of things processors loop over
can be anything the function works on, e.g. model + data
"""
output_and_grads = self.pool.map(function, arguments)
return output_and_grads
|
36462
|
import csv
def save_minimal_pairs(output_filename, to_output, write_header=True):
if isinstance(output_filename, str):
outf = open(output_filename, mode='w', encoding='utf-8-sig', newline='')
needs_closed = True
else:
outf = output_filename
needs_closed = False
writer = csv.writer(outf, delimiter='\t')
if write_header:
writer.writerow(['FIRST_SEGMENT', 'SECOND_SEGMENT',
'FIRST_WORD', 'FIRST_WORD_TRANSCRIPTION',
'SECOND_WORD', 'SECOND_WORD_TRANSCRIPTION'])
for _, _, ret_dict in to_output:
for seg_pair, word_pair_set in ret_dict.items():
for word_pair in word_pair_set:
writer.writerow([seg_pair[0], seg_pair[1],
word_pair[0][0], word_pair[0][1],
word_pair[1][0], word_pair[1][1]])
if needs_closed:
outf.close()
|
36492
|
c = get_config()
#Export all the notebooks in the current directory to the sphinx_howto format.
c.NbConvertApp.notebooks = ['*.ipynb']
c.NbConvertApp.export_format = 'latex'
c.NbConvertApp.postprocessor_class = 'PDF'
c.Exporter.template_file = 'custom_article.tplx'
|
36510
|
from polyphony import testbench
class C:
def __init__(self, x):
self.x = x
class D:
def __init__(self, c):
self.c = c
def alias02(x):
c0 = C(x)
c1 = C(x*x)
d = D(c0)
result0 = d.c.x == x
d.c = c1
result1 = d.c.x == x*x
c1.x = 0
result2 = d.c.x == 0
d.c = c0
result3 = d.c.x == x
return result0 and result1 and result2 and result3
@testbench
def test():
assert alias02(1)
assert alias02(2)
assert alias02(3)
test()
|
36515
|
import json
import requests
from rotkehlchen.assets.asset import Asset
from rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.history.deserialization import deserialize_price
from rotkehlchen.types import Price
PRICE_API_URL = 'https://bisq.markets/api/ticker?market={symbol}_BTC'
def get_bisq_market_price(asset: Asset) -> Price:
"""
Get price for pair at bisq marketplace. Price is returned against BTC.
Can raise:
- RemoteError: If the market doesn't exists or request fails
- DeserializationError: If the data returned is not a valid price
"""
url = PRICE_API_URL.format(symbol=asset.symbol)
try:
response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)
except requests.exceptions.RequestException as e:
raise RemoteError(f'bisq.markets request {url} failed due to {str(e)}') from e
try:
data = response.json()
except json.decoder.JSONDecodeError as e:
raise RemoteError(
f'Failed to read json response from bisq.markets. {response.text}. {str(e)}',
) from e
if 'error' in data:
raise RemoteError(f'Request data from bisq.markets {url} is not valid {data["error"]}')
try:
price = data['last']
except KeyError as e:
raise DeserializationError(
f'Response from bisq.markets didnt contain expected key "last". {data}',
) from e
return deserialize_price(price)
|
36547
|
from sklearn.metrics import mean_squared_error, log_loss
from keras.models import Model
from keras.models import load_model
from keras.layers import Input, Dense
from keras.layers.recurrent import SimpleRNN
from keras.layers.merge import multiply, concatenate, add
from keras import backend as K
from keras import initializers
from keras.callbacks import EarlyStopping
from keras.layers.wrappers import TimeDistributed
from keras.callbacks import Callback
from keras import optimizers
import pandas as pd
import numpy as np
from keras.constraints import max_norm, non_neg, unit_norm
np.random.seed(42)
from math import sqrt
import os
import sys
from collections import defaultdict
class DeepAFM:
def __init__(self):
pass
def custom_bce(self, y_true, y_pred):
b = K.not_equal(y_true, -K.ones_like(y_true))
b = K.cast(b, dtype='float32')
ans = K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1) * K.mean(b, axis=-1)
ans = K.cast(ans, dtype='float32')
return K.sum(ans)
def custom_activation(self, x):
if self.activation.split('-')[0] == "custom":
a = float(self.activation.split('-')[1])
return 1.0 / ( 1 + K.exp(-a*x) )
elif self.activation.split('-')[0] == "rounded":
K.minimum(K.maximum(K.round(K.sigmoid(x)), 0), 1)
def custom_init(self, shape, dtype=None):
return K.cast_to_floatx(self.Q_jk_initialize)
def custom_random(self, shape, dtype=None):
if self.random_init == "normal":
return K.random_normal(shape, 0.5, 0.05, dtype=dtype, seed=22)
else:
return K.random_uniform(shape, 0, 1, dtype=dtype, seed=22)
def f(self, x):
def custom_init(shape, dtype=None):
return K.cast_to_floatx(np.reshape(x, shape))
return custom_init
def build(self, dafm_type="dafm-afm", optimizer="rmsprop", learning_rate=0.01, activation="linear", Q_jk_initialize=0, section="", section_count=0, model1="", stateful=False, theta_student="False", student_count=0, binary="False"):
skills = np.shape(Q_jk_initialize)[1]
steps = np.shape(Q_jk_initialize)[0]
self.activation = activation
if '-' in self.activation:
activation = self.custom_activation
if dafm_type.split("_")[-1] == "different":
skills = int( float(dafm_type.split("_")[-2])*skills )
dafm_type = dafm_type.split('_')[0]
if dafm_type.split("_")[0] == "round-fine-tuned":
try:
self.round_threshold = float(dafm_type.split("_")[-1])
dafm_type = dafm_type.split("_")[0]
except:
pass
q_jk_size = skills
if '^' in dafm_type:
q_jk_size = skills
skills = int (float(dafm_type.split('^')[-1]) * skills)
dafm_type = dafm_type.split('^')[0]
self.dafm_type = dafm_type
if dafm_type == "random-uniform" or dafm_type == "random-normal":
qtrainable, finetuning, randomize = True, False, True
self.random_init = dafm_type.split('-')[-1]
elif dafm_type == "dafm-afm":
qtrainable, finetuning, randomize = False, False, False
elif dafm_type == "fine-tuned":
qtrainable, finetuning, randomize = True, True, False
elif dafm_type == "kcinitialize":
qtrainable, finetuning, randomize = True, False, False
elif dafm_type== "round-fine-tuned":
# if not self.round_threshold == -1:
# rounded_Qjk = np.abs(Q_jk1 - Q_jk_initialize)
# Q_jk1[rounded_Qjk <= self.round_threshold] = Q_jk_initialize[rounded_Qjk <= self.round_threshold]
# Q_jk1[rounded_Qjk > self.round_threshold] = np.ones(np.shape(Q_jk_initialize[rounded_Qjk > self.round_threshold])) - Q_jk_initialize[rounded_Qjk > self.round_threshold]
# else:
Q_jk1 = model1.get_layer("Q_jk").get_weights()[0]
Q_jk1 = np.minimum(np.ones(np.shape(Q_jk1)), np.maximum(np.round(Q_jk1), np.zeros(np.shape(Q_jk1))))
model1.get_layer("Q_jk").set_weights([Q_jk1])
return model1
elif dafm_type == "qjk-dense":
qtrainable, finetuning, randomize = False, False, False
activation_dense = activation
elif dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
qtrainable, finetuning, randomize = False, False, True
self.random_init = dafm_type.split('-')[-1]
activation_dense = activation
else:
print ("No Valid Model Found")
sys.exit()
if section == "onehot":
section_input = Input(batch_shape=(None, None, section_count), name='section_input')
if not theta_student=="False":
student_input = Input(batch_shape=(None, None, student_count), name='student_input')
virtual_input1 = Input(batch_shape=(None, None, 1), name='virtual_input1')
if finetuning:
B_k = TimeDistributed(Dense(skills, activation='linear', kernel_initializer=self.f(model1.get_layer("B_k").get_weights()[0]), use_bias=False), name="B_k")(virtual_input1)
T_k = TimeDistributed(Dense(skills, activation='linear', kernel_initializer=self.f(model1.get_layer("T_k").get_weights()[0]), use_bias=False), name="T_k")(virtual_input1)
bias_layer = TimeDistributed(Dense(1, activation='linear', use_bias=False, kernel_initializer=self.f(model1.get_layer("bias").get_weights()[0]), trainable=True), name="bias")(virtual_input1)
else:
B_k = TimeDistributed(Dense(skills, activation='linear', use_bias=False, trainable=True), name="B_k")(virtual_input1)
T_k = TimeDistributed(Dense(skills, activation='linear', use_bias=False, trainable=True), name="T_k")(virtual_input1)
bias_layer = TimeDistributed(Dense(1, activation='linear', use_bias=False, kernel_initializer=initializers.Zeros(), trainable=True), name="bias")(virtual_input1)
step_input = Input(batch_shape=(None, None, steps), name='step_input')
if randomize:
if binary=="False":
Q_jk = TimeDistributed(Dense(q_jk_size, use_bias=False, activation=activation, kernel_initializer=self.custom_random), trainable=qtrainable ,name="Q_jk")(step_input)
else:
Q_jk = TimeDistributed(BinaryDense(q_jk_size, use_bias=False, activation=activation, kernel_initializer=self.custom_random),trainable=qtrainable, name="Q_jk")(step_input)
else:
if binary=="False":
Q_jk = TimeDistributed(Dense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize), use_bias=False,trainable=qtrainable), trainable=qtrainable, name="Q_jk")(step_input)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize),trainable=qtrainable,
use_bias=False), name="Q_jk", trainable=qtrainable)(step_input)
if dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
if binary =="False":
Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
elif dafm_type == "qjk-dense":
if binary =='False':
Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
else:
pass
Qjk_mul_Bk = multiply([Q_jk, B_k])
sum_Qjk_Bk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False,name="sum_Qjk_Bk")(Qjk_mul_Bk)
P_k = SimpleRNN(skills, kernel_initializer=initializers.Identity(), recurrent_initializer=initializers.Identity() , use_bias=False, trainable=False, activation='linear', return_sequences=True, name="P_k")(Q_jk)
Qjk_mul_Pk_mul_Tk = multiply([Q_jk, P_k, T_k])
sum_Qjk_Pk_Tk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False),trainable=False, name="sum_Qjk_Pk_Tk")(Qjk_mul_Pk_mul_Tk)
Concatenate = concatenate([bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk])
if not (theta_student=="False"):
if finetuning:
theta = TimeDistributed(Dense(1, activation="linear", use_bias=False, kernel_initializer=self.f(model1.get_layer("theta").get_weights()[0])), name='theta')(student_input)
else:
theta = TimeDistributed(Dense(1, activation="linear", use_bias=False), name='theta')(student_input)
Concatenate = concatenate([Concatenate, theta])
if section == "onehot":
if finetuning:
S_k = TimeDistributed(Dense(1, activation="linear", use_bias=False, kernel_initializer=self.f(model1.get_layer("S_k").get_weights()[0])), name='S_k')(section_input)
else:
S_k = TimeDistributed(Dense(1, activation="linear", use_bias=False), name='S_k')(section_input)
Concatenate = concatenate([Concatenate, S_k])
output = TimeDistributed(Dense(1, activation="sigmoid", trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False, name="output")(Concatenate)
if section == "onehot" and not (theta_student=="False"):
model = Model(inputs=[virtual_input1, step_input, section_input, student_input], outputs=output)
elif section == "onehot" and theta_student=="False":
model = Model(inputs=[virtual_input1, step_input, section_input], outputs=output)
elif not (section == "onehot") and not (theta_student=="False"):
model = Model(inputs=[virtual_input1, step_input, student_input], outputs=output)
else:
model = Model(inputs=[virtual_input1, step_input], outputs=output)
d_optimizer = {"rmsprop":optimizers.RMSprop(lr=learning_rate), "adam":optimizers.Adam(lr=learning_rate), "adagrad":optimizers.Adagrad(lr=learning_rate) }
model.compile( optimizer = d_optimizer[optimizer],
loss = self.custom_bce)
return model
def fit(self, x_train, y_train, x_train_section, x_train_student, x_test, y_test, x_test_section, x_test_student, model, epochs=5, batch_size=32, loaded=False, validation=True):
loss_epoch = {"epoch":[], "loss":[], "val_loss":[], 'patience':[]}
print ("Max Epochs", epochs)
if self.dafm_type == "round-fine-tuned" or loaded:
best_model = model
patience, epoch = 0 , 1
prev_best_val_loss = np.inf
counter = 0
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
virtual_input1_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if not validation:
earlyStopping = EarlyStopping(monitor='loss', patience=2)
if len(x_train_student) == 0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=epochs , callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
# print ("Epoch Number:", counter, "Patience:", 0, "val loss:", current_val_loss)
loss_epoch["loss"].extend(history_callback.history["loss"])
loss_epoch["val_loss"].extend(history_callback.history["loss"])
loss_epoch["epoch"].extend(list(range(epochs)))
loss_epoch["patience"].extend(list(range(epochs)))
best_model = model
epoch = epochs
else:
while (patience <=5 and epoch <= epochs and (not self.dafm_type == "round-fine-tuned") and (loaded == False)):
permutation = np.random.permutation(x_train.shape[0])
x_train = x_train[permutation]
y_train = y_train[permutation]
counter += 1
if len(x_train_student) == 0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test], y_test), verbose=0, shuffle=True)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_section], y_test), verbose=0, shuffle=True)
else:
x_train_student = x_train_student[permutation]
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_student], y_test), verbose=0, shuffle=True)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_section, x_test_student], y_test), verbose=0, shuffle=True)
current_val_loss = history_callback.history["val_loss"][0]
print ("Epoch Number:", counter, "Patience:", patience, "val loss:", current_val_loss)
loss_epoch["val_loss"].append(history_callback.history["val_loss"][0])
loss_epoch["loss"].append(history_callback.history["loss"][0])
loss_epoch["epoch"].append(counter)
loss_epoch["patience"].append(patience)
if (prev_best_val_loss - current_val_loss) > 0:
best_model = model
epoch += patience + 1
patience = 0
prev_best_val_loss = current_val_loss
else:
patience += 1
if len(x_train_student)==0:
if len(x_train_section)==0:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train]), x_train)
else:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_section]), x_train)
else:
if len(x_train_section)==0:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_student]), x_train)
else:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_section, x_train_student]), x_train)
L, N = -np.sum(x), len(x)
model_param = best_model.count_params()
print ("PARAM", model_param)
AIC = 2 * model_param - 2 * L
BIC = model_param * np.log(N) - 2 * L
B_k = best_model.get_layer("B_k").get_weights()[0]
T_k = best_model.get_layer("T_k").get_weights()[0]
return best_model, AIC, BIC, epoch, loss_epoch
def fit_batches(self, dafmdata_obj, model, max_epochs=30, earlyStop="val_loss", loaded=False):
print ("Max Epochs", max_epochs)
loss_epoch = {"epoch":[], "loss":[], earlyStop:[], 'patience':[]}
patience, epoch = 0, 1
prev_best_val_loss = np.inf
counter = 0
if self.dafm_type == "round-fine-tuned" or loaded:
best_model = model
while (patience <= 2 and epoch <= max_epochs and loaded==False and (not self.dafm_type == "round-fine-tuned")):
counter += 1
current_val_loss = 0
total_loss, total_train_samples = 0, 0
train = dafmdata_obj.data_generator1("train")
test = dafmdata_obj.data_generator1("test")
bc = 0
for x_train, y_train, x_train_section, x_train_student, batch_size in train:
permutation = np.random.permutation(x_train.shape[0])
x_train = x_train[permutation]
y_train = y_train[permutation]
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
print ("Batch Number:", bc, np.shape(x_train))
if len(x_train_student)==0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=1, verbose=0)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=1, verbose=1)
else:
x_train_student = x_train_student[permutation]
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=1, verbose=0)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=1, verbose=1)
total_loss += history_callback.history["loss"][0] * len(x_train)
total_train_samples += len(x_train)
bc += 1
if earlyStop == "rmse":
current_avg_rmse = self.predict_batches(dafmdata_obj, model)
loss_epoch["rmse"].append(current_avg_rmse)
else:
current_avg_rmse = np.mean(self.bce_loss_batches(dafmdata_obj, model, utype="test"))
loss_epoch["val_loss"].append(current_avg_rmse)
loss_epoch["loss"].append(float(total_loss)/float(total_train_samples))
loss_epoch["epoch"].append(counter)
loss_epoch["patience"].append(patience)
print ("Epoch Number:", counter, "Patience:", patience, earlyStop, current_avg_rmse, "Loss:", loss_epoch["loss"][-1])
if (prev_best_val_loss - current_avg_rmse) > 0:
best_model = model
epoch += patience + 1
patience = 0
prev_best_val_loss = current_avg_rmse
else:
patience += 1
x = self.bce_loss_batches(dafmdata_obj, best_model, utype="train")
L, N = -np.sum(x), len(x)
model_param = best_model.count_params()
AIC = 2 * model_param - 2 * L
BIC = model_param * np.log(N) - 2 * L
return best_model, AIC, BIC, epoch, loss_epoch
def L(self, y_true, y_pred, x_train):
mask_matrix = np.sum(x_train, axis=2).flatten()
num_users, max_responses = np.shape(x_train)[0], np.shape(x_train)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
rmse = []
SSR = 0
response = 0
L = 0
N = 0
c = 0
for user in range(num_users):
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
break
if y_pred[i] < 1 and y_pred[i] > 0:
L += ( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) )
else:
c += 1
eps = 1e-4
if y_pred[i] == y_true[i]:
pass
else:
y_pred[i] = max(eps, min(1 - eps, y_pred[i]))
L += ( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) )
response += 1
N += 1
return L, N
def L_batches(self, dafmdata_obj, model):
L = 0
N = 0
train_generator = dafmdata_obj.data_generator1("train")
for x_train, y_train, x_train_section, x_train_student, batch_size in train_generator:
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
if len(x_train_student)==0:
if len(x_train_section) == 0:
l, x= self.L(y_train, model.predict([virtual_input1, x_train]), x_train)
L += l
else:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_section]), x_train)
L += l
else:
if len(x_train_section) == 0:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_student]), x_train)
L += l
else:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_section, x_train_student]), x_train)
L += l
N += len(x_train)
return L, N
def predict(self, x_test, y_test, x_test_section, x_test_student, model, batch_size=32):
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student)==0:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section] , batch_size=batch_size)
else:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section, x_test_student] , batch_size=batch_size)
rmse = self.rmse_masking(y_test, y_pred, x_test)
return rmse
def prediction(self, x_test, x_test_section, x_test_student, model, batch_size=32):
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student)==0:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section] , batch_size=batch_size)
else:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section, x_test_student], batch_size=batch_size)
return y_pred
def predict_batches(self, dafmdata_obj, model):
test_generator = dafmdata_obj.data_generator1("test")
avg_rmse = 0
t_users = 0
for x_test, y_test, x_test_section, x_test_student, batch_size in test_generator:
avg_rmse = avg_rmse + len(x_test)*self.predict(x_test, y_test, x_test_section, x_test_student, model, batch_size)
t_users = t_users + len(x_test)
return avg_rmse/float(t_users)
def bce_loss_batches(self, dafmdata_obj, model, utype="train"):
ll = []
test_generator = dafmdata_obj.data_generator1(utype)
for x_test, y_test, x_test_section, x_test_student, batch_size in test_generator:
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student) == 0:
if len(x_test_section) == 0:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test], batch_size=batch_size), x_test))
else:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_section], batch_size=batch_size), x_test))
else:
if len(x_test_section) == 0:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size), x_test))
else:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_section, x_test_student], batch_size=batch_size), x_test))
return ll
def bce_loss(self, y_true, y_pred, x_test):
mask_matrix = np.sum(x_test, axis=2).flatten()
num_users, max_responses = np.shape(x_test)[0], np.shape(x_test)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
ll = []
response = 0
for user in range(num_users):
log_loss = []
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
break
response += 1
eps = 1e-7
y_pred[i] = max(eps, min(1 - eps, y_pred[i]))
log_loss.append( -( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) ) )
ll.extend(log_loss)
return ll
def rmse_masking(self, y_true, y_pred, x_test):
mask_matrix = np.sum(x_test, axis=2).flatten()
num_users, max_responses = np.shape(x_test)[0], np.shape(x_test)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
rmse = []
for user in range(num_users):
diff_sq, response = 0, 0
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
continue
# continue for response level evaluation
diff_sq += (y_true[i] - y_pred[i]) ** 2
response += 1
rmse.append(sqrt(diff_sq/float(response)))
return np.mean(rmse)
if __name__ == "__main__":
x_train = [ [ [0, 0, 1], [0, 0, 1], [1, 0, 0], [0, 0, 0] ],
[ [1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 0] ],
[ [0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1] ],
[ [1, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0] ] ]
x_test = [ [ [ 1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1] ] ]
y_test = [ [ [-1], [-1], [-1], [-1] ] ]
y_train = [ [ [0], [0], [1], [-1] ],
[ [1], [0], [1], [-1] ],
[ [0], [0], [0], [0] ],
[ [0], [1], [0], [0] ] ]
Q_jk_initialize = np.random.rand(3,2)
Q_jk_initialize = np.array([[1, 0], [0, 1], [1, 1]])
obj = DAFM(np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test), Q_jk_initialize, skills=2, steps=3)
model = obj.build(qtrainable=False, finetuning=False, loaded=False, dftype="")
obj.predict(np.array(x_test), np.array(y_test), model)
|
36549
|
import zengl
from defaults import defaults
from grid import grid_pipeline
from window import Window
window = Window(1280, 720)
ctx = zengl.context()
image = ctx.image(window.size, 'rgba8unorm', samples=4)
depth = ctx.image(window.size, 'depth24plus', samples=4)
image.clear_value = (0.2, 0.2, 0.2, 1.0)
ctx.includes['defaults'] = defaults
grid = grid_pipeline(ctx, [image, depth])
pipeline = ctx.pipeline(
vertex_shader='''
#version 330
#include "defaults"
vec3 vertices[24] = vec3[](
vec3(0.000000, 1.000000, -0.500000),
vec3(0.000000, 1.000000, 0.500000),
vec3(0.500000, 0.866025, -0.500000),
vec3(0.500000, 0.866025, 0.500000),
vec3(0.866025, 0.500000, -0.500000),
vec3(0.866025, 0.500000, 0.500000),
vec3(1.000000, -0.000000, -0.500000),
vec3(1.000000, -0.000000, 0.500000),
vec3(0.866025, -0.500000, -0.500000),
vec3(0.866025, -0.500000, 0.500000),
vec3(0.500000, -0.866025, -0.500000),
vec3(0.500000, -0.866025, 0.500000),
vec3(-0.000000, -1.000000, -0.500000),
vec3(-0.000000, -1.000000, 0.500000),
vec3(-0.500000, -0.866025, -0.500000),
vec3(-0.500000, -0.866025, 0.500000),
vec3(-0.866025, -0.500000, -0.500000),
vec3(-0.866025, -0.500000, 0.500000),
vec3(-1.000000, 0.000000, -0.500000),
vec3(-1.000000, 0.000000, 0.500000),
vec3(-0.866025, 0.500000, -0.500000),
vec3(-0.866025, 0.500000, 0.500000),
vec3(-0.500000, 0.866025, -0.500000),
vec3(-0.500000, 0.866025, 0.500000)
);
vec3 normals[14] = vec3[](
vec3(-0.0000, 1.0000, -0.0000),
vec3(0.5000, 0.8660, -0.0000),
vec3(0.8660, 0.5000, -0.0000),
vec3(1.0000, -0.0000, -0.0000),
vec3(0.8660, -0.5000, -0.0000),
vec3(0.5000, -0.8660, -0.0000),
vec3(-0.0000, -1.0000, -0.0000),
vec3(-0.5000, -0.8660, -0.0000),
vec3(-0.8660, -0.5000, -0.0000),
vec3(-1.0000, -0.0000, -0.0000),
vec3(-0.8660, 0.5000, -0.0000),
vec3(-0.0000, -0.0000, 1.0000),
vec3(-0.5000, 0.8660, -0.0000),
vec3(-0.0000, -0.0000, -1.0000)
);
vec2 texcoords[50] = vec2[](
vec2(1.000000, 0.500000),
vec2(0.000000, 0.500000),
vec2(0.750000, 0.490000),
vec2(1.000000, 1.000000),
vec2(0.250000, 0.490000),
vec2(0.000000, 1.000000),
vec2(0.916667, 0.500000),
vec2(0.870000, 0.457846),
vec2(0.916667, 1.000000),
vec2(0.370000, 0.457846),
vec2(0.833333, 0.500000),
vec2(0.957846, 0.370000),
vec2(0.833333, 1.000000),
vec2(0.457846, 0.370000),
vec2(0.750000, 0.500000),
vec2(0.990000, 0.250000),
vec2(0.750000, 1.000000),
vec2(0.490000, 0.250000),
vec2(0.666667, 0.500000),
vec2(0.957846, 0.130000),
vec2(0.666667, 1.000000),
vec2(0.457846, 0.130000),
vec2(0.583333, 0.500000),
vec2(0.870000, 0.042154),
vec2(0.583333, 1.000000),
vec2(0.370000, 0.042154),
vec2(0.500000, 0.500000),
vec2(0.750000, 0.010000),
vec2(0.500000, 1.000000),
vec2(0.250000, 0.010000),
vec2(0.416667, 0.500000),
vec2(0.630000, 0.042154),
vec2(0.416667, 1.000000),
vec2(0.130000, 0.042154),
vec2(0.333333, 0.500000),
vec2(0.542154, 0.130000),
vec2(0.333333, 1.000000),
vec2(0.042154, 0.130000),
vec2(0.250000, 0.500000),
vec2(0.510000, 0.250000),
vec2(0.250000, 1.000000),
vec2(0.010000, 0.250000),
vec2(0.166667, 0.500000),
vec2(0.542154, 0.370000),
vec2(0.042154, 0.370000),
vec2(0.166667, 1.000000),
vec2(0.083333, 0.500000),
vec2(0.630000, 0.457846),
vec2(0.130000, 0.457846),
vec2(0.083333, 1.000000)
);
int vertex_indices[132] = int[](
1, 2, 0, 3, 4, 2, 5, 6, 4, 7, 8, 6, 9, 10, 8, 11, 12, 10, 13, 14, 12, 15, 16, 14, 17, 18, 16, 19, 20, 18,
21, 13, 5, 21, 22, 20, 23, 0, 22, 6, 14, 22, 1, 3, 2, 3, 5, 4, 5, 7, 6, 7, 9, 8, 9, 11, 10, 11, 13, 12, 13,
15, 14, 15, 17, 16, 17, 19, 18, 19, 21, 20, 5, 3, 1, 1, 23, 21, 21, 19, 17, 17, 15, 13, 13, 11, 9, 9, 7, 5,
5, 1, 21, 21, 17, 13, 13, 9, 5, 21, 23, 22, 23, 1, 0, 22, 0, 2, 2, 4, 6, 6, 8, 10, 10, 12, 14, 14, 16, 18,
18, 20, 22, 22, 2, 6, 6, 10, 14, 14, 18, 22
);
int normal_indices[132] = int[](
0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4, 5, 4, 5, 6, 5, 6, 7, 6, 7, 8, 7, 8, 9, 8, 9, 10, 9, 11, 11, 11, 10,
12, 10, 12, 0, 12, 13, 13, 13, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8,
9, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 10, 12, 12, 12, 0, 0, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
);
int texcoord_indices[132] = int[](
3, 6, 0, 8, 10, 6, 12, 14, 10, 16, 18, 14, 20, 22, 18, 24, 26, 22, 28, 30, 26, 32, 34, 30, 36, 38, 34, 40,
42, 38, 44, 29, 13, 45, 46, 42, 49, 1, 46, 15, 31, 47, 3, 8, 6, 8, 12, 10, 12, 16, 14, 16, 20, 18, 20, 24,
22, 24, 28, 26, 28, 32, 30, 32, 36, 34, 36, 40, 38, 40, 45, 42, 13, 9, 4, 4, 48, 44, 44, 41, 37, 37, 33,
29, 29, 25, 21, 21, 17, 13, 13, 4, 44, 44, 37, 29, 29, 21, 13, 45, 49, 46, 49, 5, 1, 47, 2, 7, 7, 11, 15,
15, 19, 23, 23, 27, 31, 31, 35, 39, 39, 43, 47, 47, 7, 15, 15, 23, 31, 31, 39, 47
);
out vec3 v_vertex;
out vec3 v_normal;
out vec2 v_texcoord;
void main() {
v_vertex = vertices[vertex_indices[gl_VertexID]];
v_normal = normals[normal_indices[gl_VertexID]];
v_texcoord = texcoords[texcoord_indices[gl_VertexID]];
gl_Position = mvp * vec4(v_vertex, 1.0);
}
''',
fragment_shader='''
#version 330
#include "defaults"
in vec3 v_normal;
layout (location = 0) out vec4 out_color;
void main() {
float lum = dot(normalize(light.xyz), normalize(v_normal)) * 0.7 + 0.3;
out_color = vec4(lum, lum, lum, 1.0);
}
''',
framebuffer=[image, depth],
topology='triangles',
cull_face='back',
vertex_count=132,
)
while window.update():
image.clear()
depth.clear()
grid.render()
pipeline.render()
image.blit()
|
36554
|
import behave
@behave.when(u"I list triggers")
def step_impl(context):
context.trigger_list = context.service.triggers.list()
@behave.then(u'I receive a Trigger list of "{count}" objects')
def step_impl(context, count):
assert context.trigger_list.items_count == int(count)
if int(count) > 0:
for page in context.trigger_list:
for trigger in page:
assert isinstance(trigger, context.dl.entities.Trigger) or \
isinstance(trigger, context.dl.entities.trigger.CronTrigger)
|
36568
|
import os
from flask import current_app
from flask import _app_ctx_stack as stack
class ConfigWriter(object):
def __init__(self, app=None, consul=None, vault=None):
self.app = app
self.consul = consul
self.vault = vault
if app is not None:
self.init_app(app, consul, vault)
def init_app(self, app, consul, vault):
self.consul = consul
self.vault = vault
self.consul_prefix = app.config.get('CONSUL_PREFIX', 'cabotage')
self.vault_prefix = app.config.get('VAULT_PREFIX', 'secret/cabotage')
app.teardown_appcontext(self.teardown)
def teardown(self, exception):
pass
def write_configuration(self, org_slug, project_slug, app_slug, configuration):
version = configuration.version_id + 1 if configuration.version_id else 1
if configuration.secret:
if self.vault is None:
raise RuntimeError('No Vault extension configured!')
config_key_name = (f'{self.vault_prefix}/automation'
f'/{org_slug}/{project_slug}-{app_slug}/configuration/'
f'{configuration.name}/{version}')
build_key_name = (f'{self.vault_prefix}/buildtime'
f'/{org_slug}/{project_slug}-{app_slug}/configuration/'
f'{configuration.name}/{version}')
storage = 'vault'
self.vault.vault_connection.write(
config_key_name, **{configuration.name: configuration.value},
)
if configuration.buildtime:
self.vault.vault_connection.write(
build_key_name, **{configuration.name: configuration.value},
)
else:
if self.consul is None:
raise RuntimeError('No Consul extension configured!')
config_key_name = (f'{self.consul_prefix}'
f'/{org_slug}/{project_slug}-{app_slug}/configuration/'
f'{configuration.name}/{version}/{configuration.name}')
build_key_name = config_key_name
storage = 'consul'
self.consul.consul_connection.kv.put(config_key_name, configuration.value)
config_key_name = '/'.join(config_key_name.split('/')[:-1])
return {
'config_key_slug': f'{storage}:{config_key_name}',
'build_key_slug': f'{storage}:{build_key_name}',
}
def read(self, key_slug, build=False, secret=False):
if secret:
return self.vault.vault_connection.read(key_slug)
return self.consul.consul_connection.read(key_slug)
|
36603
|
import discord
import slash_util
class SampleCog(slash_util.Cog):
@slash_util.slash_command(guild_id=123)
async def pog(self, ctx: slash_util.Context):
await ctx.send("pog", ephemeral=True)
@slash_util.message_command(guild_id=123)
async def quote(self, ctx: slash_util.Context, message: discord.Message): # the `message` parameter is REQURIED for message commands
await ctx.send(f"> {message.clean_content}\n- {message.author}")
@slash_util.user_command(guild_id=123)
async def bonk(self, ctx: slash_util.Context, user: discord.Member):
await ctx.send(f"{ctx.author} bonks {user} :hammer:")
def setup(bot):
bot.add_cog(SampleCog(bot))
|
36652
|
import numpy as np
import pytest
from sklearn.dummy import DummyRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from hcrystalball.metrics import get_scorer
from hcrystalball.model_selection import FinerTimeSplit
from hcrystalball.model_selection import get_best_not_failing_model
from hcrystalball.model_selection import select_model
from hcrystalball.wrappers import ExponentialSmoothingWrapper
from hcrystalball.wrappers import get_sklearn_wrapper
@pytest.mark.parametrize(
"train_data, grid_search, parallel_over_dict",
[("two_regions", "", {"Region": "region_0"}), ("two_regions", "", None)],
indirect=["train_data", "grid_search"],
)
def test_select_model(train_data, grid_search, parallel_over_dict):
_train_data = train_data
if parallel_over_dict:
col, value = list(parallel_over_dict.items())[0]
_train_data = train_data[train_data[col] == value].drop(columns="Region")
partition_columns = ["Region", "Product"]
results = select_model(
_train_data,
target_col_name="Quantity",
partition_columns=partition_columns,
parallel_over_dict=parallel_over_dict,
grid_search=grid_search,
country_code_column="Holidays_code",
)
if parallel_over_dict:
partitions = (
train_data.loc[train_data[col] == value, partition_columns]
.drop_duplicates()
.to_dict(orient="records")
)
else:
partitions = train_data[partition_columns].drop_duplicates().to_dict(orient="records")
assert len(results) == len(partitions)
for result in results:
assert result.best_model_name == "good_dummy"
assert result.partition in partitions
@pytest.mark.parametrize(
"X_y_optional, negative_data, best_model_name, rank, expected_error",
[
("", False, "ExponentialSmoothingWrapper", 1, None),
("", True, "SklearnWrapper", 2, None),
("", True, "", 2, ValueError),
],
indirect=["X_y_optional"],
)
def test_get_best_not_failing_model(X_y_optional, negative_data, best_model_name, rank, expected_error):
X, y = X_y_optional
# data contains 0
y[y < 1] = 1
if negative_data:
y[-1] = -1
models = [
ExponentialSmoothingWrapper(freq="D", seasonal="mul"),
get_sklearn_wrapper(DummyRegressor, strategy="constant", constant=-1000),
]
models = models if expected_error is None else models[:1]
grid_search = GridSearchCV(
estimator=Pipeline([("model", "passthrough")]),
param_grid=[{"model": models}],
scoring=get_scorer("neg_mean_absolute_error"),
cv=FinerTimeSplit(n_splits=1, horizon=5),
refit=False,
error_score=np.nan,
)
grid_search.fit(X, y)
if expected_error:
with pytest.raises(expected_error):
get_best_not_failing_model(grid_search, X, y)
else:
best_param_rank = get_best_not_failing_model(grid_search, X, y)
assert isinstance(best_param_rank, dict)
assert best_param_rank["params"]["model"].__class__.__name__ == best_model_name
assert best_param_rank["rank"] == rank
|
36659
|
import os, pickle
import os.path as osp
import numpy as np
import cv2
import scipy.ndimage as nd
import init_path
from lib.dataset.get_dataset import get_dataset
from lib.network.sgan import SGAN
import torch
from torch.utils.data import DataLoader
import argparse
from ipdb import set_trace
import matplotlib.pyplot as plt
from lib.utils import pyutils
classes=['background',
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--cfg_file", default=None, type=str)
args = parser.parse_args()
args = pyutils.read_yaml2cls(args.cfg_file)
return args
# mean pixel : in B-G-R channel order
mean_pixel = np.array([104.008, 116.669, 122.675])
def preprocess(image, size):
""" pre-process images with Opencv format"""
image = np.array(image)
H, W, _ = image.shape
image = nd.zoom(image.astype('float32'), (size / H, size / W, 1.0), order=1)
image = image - mean_pixel
image = image.transpose([2, 0, 1])
image = np.expand_dims(image, axis=0)
return torch.from_numpy(image)
def generate_seed_with_ignore(localization):
"""
This function generate seed ignoring all the conflicts
:param localization: (41, 41, 21) binary value
:return:
"""
h, w, c = localization.shape
assert (h == 41) & (w == 41) & (c == 21)
# set_trace()
# find conflict index
sum_loc = np.sum(localization, axis=2)
conflict_ind = np.where(sum_loc > 1)
# set conflict position to 0
localization[conflict_ind[0], conflict_ind[1], :] = 0
# generate seed
ind = np.where(localization)
mask = np.ones(shape=(h, w), dtype=np.int) * 21
mask[ind[0], ind[1]] = ind[2]
return mask
def generate_seed_wo_ignore(localization, train_boat=False):
"""
This function generate seed with priority strategy
:param localization:
:return:
"""
h, w, c = localization.shape
assert (h == 41) & (w == 41) & (c == 21)
# generate background seed
mask = np.ones((h, w), dtype=np.int) * 21
bg_ind = np.where(localization[:, :, 0])
mask[bg_ind[0], bg_ind[1]] = 0
# generate foreground seed in the order of their area
area = np.sum(localization, axis=(0, 1))
cls_order = np.argsort(area)[::-1] # area in descending order
for cls in cls_order:
if area[cls] == 0:
break
ind = np.where(localization[:, :, cls])
mask[ind[0], ind[1]] = cls
if train_boat:
train_boat_ind = np.where(((mask == 4) | (mask == 19)) & (localization[:, :, 0] == 1))
mask[train_boat_ind] = 0
return mask
def get_localization_cues_sec(att_maps, saliency, im_label, cam_thresh):
"""get localization cues with method in SEC paper
perform hard thresholding for each foreground class
Parameters
----------
att_maps: [41, 41, 20]
saliency: [H, W]
im_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
Return
------
seg_mask: [41, 41]
"""
h, w = att_maps.shape[:2]
im_h, im_w = saliency.shape[:2]
localization1 = np.zeros(shape=(h, w, 21))
for idx in im_label: # idx: aero=1
heat_map = att_maps[:, :, idx - 1]
localization1[:, :, idx] = heat_map > cam_thresh * np.max(heat_map)
# bg_cue = saliency.astype(np.float32)
# bg_cue = bg_cue / 255
bg_cue = nd.zoom(saliency, (h / im_h, h / im_w), order=1)
localization1[:, :, 0] = bg_cue < 0.06
# handle conflict seed
if args.ignore_conflict:
seg_mask = generate_seed_with_ignore(localization1)
else:
seg_mask = generate_seed_wo_ignore(localization1, train_boat=True)
return seg_mask
def get_localization_cues_dcsp(att_maps, saliency, im_label, bg_thresh):
"""get localization cues with method in DCSP paper
compute harmonic mean for each foreground class
Parameters
----------
att_maps: [41, 41, 20]
saliency: [H, W]
im_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
Return
------
seg_mask: [41, 41]
"""
h, w = att_maps.shape[:2]
im_h, im_w = saliency.shape[:2]
re_sal = nd.zoom(saliency, (h / im_h, w / im_w), order=1)
localization1 = np.zeros(shape=(h, w, 20))
for idx in im_label: # idx: aero=1
localization1[:, :, idx - 1] = 2 / ((1 / (att_maps[:, :, idx - 1] + 1e-7)) + (1 / (re_sal + 1e-7)))
hm_max = np.max(localization1, axis=2)
seg_mask = np.argmax(localization1, axis=2) + 1
seg_mask[hm_max < bg_thresh] = 0
return seg_mask
def filter_weight_dict(weight_dict, model_dict):
# filter the parameters that exist in the pretrained model
pretrained_dict = dict()
for k, v in weight_dict.items():
# keep compatable with the previous version of network definition
if "conv" in k and "backbone" not in k:
k = "backbone." + k
if k in model_dict:
pretrained_dict[k] = v
model_dict.update(pretrained_dict)
return model_dict
if __name__ == '__main__':
args = parse_args()
device = torch.device("cuda:0")
# input and output
im_tags = pickle.load(open(args.cue_file, "rb"))
if not osp.exists(args.res_path):
os.mkdir(args.res_path)
_, test_dataset = get_dataset(args.dataset_name, args)
batch_size = 8
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
# load net and trained weights
model = SGAN(backbone_name=args.backbone)
weight_dict = torch.load(osp.join(args.save_model_path, args.cfg_name, "model_iter_" + str(args.max_iter) + ".pth"))
model_dict = filter_weight_dict(weight_dict, model.state_dict())
model.load_state_dict(model_dict)
model = model.to(device)
model.eval()
save_path = osp.join(args.res_path, args.cfg_name + args.test_cfg)
if not osp.exists(save_path):
os.makedirs(save_path)
# compute class activation map
with torch.no_grad():
for num, pack in enumerate(test_loader):
names, imgs, labels = pack[0], pack[1].to(device, dtype=torch.float32), \
pack[2].numpy()
fg_sim = pack[3].to(device, dtype=torch.float32)
bg_sim = pack[4].to(device, dtype=torch.float32)
sizes = pack[6].to("cpu").numpy()
if args.combine_seedseg:
_, segs, cams = model.forward_cam(imgs, fg_sim, bg_sim)
cams = cams + segs
# cams = segs
else:
_, _, cams = model.forward_cam(imgs, fg_sim, bg_sim)
np_cams = np.transpose(cams.cpu().numpy(), (0, 2, 3, 1))
_, h, w, c = np_cams.shape
for k, name in enumerate(names):
# get output cam
im_label = im_tags[name]
im_h, im_w = sizes[k]
np_cam = np_cams[k]
# get saliency
bg_cue = cv2.imread(osp.join(args.dataset_root, "sal", args.sdnet_path, name + ".png"), cv2.IMREAD_GRAYSCALE)
bg_cue = bg_cue.astype(np.float32)
bg_cue = bg_cue / 255
seg_mask = get_localization_cues_sec(np_cam, bg_cue, im_label, args.cam_thresh)
# save mask
write_mask = nd.zoom(seg_mask, (im_h / h, im_w / w), order=0)
cv2.imwrite(osp.join(save_path, name + ".png"), write_mask)
|
36682
|
class Solution:
def divisorGame(self, N: int) -> bool:
return True if N % 2 == 0 else False
|
36745
|
import sys
import signal
from clint.textui import colored, puts
from downloader import Downloader
from extractor import Extractor
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
def main():
downloader = Downloader()
extractor = Extractor()
url = "https://pornhub.com"
puts(colored.green("getting video keys."))
main_page = downloader.get(url)
view_keys = extractor.get_viewkeys(main_page)
puts(colored.green("starting to download videos."))
for key in view_keys:
puts(colored.green("getting video information."))
absolute_url = "https://pornhub.com/view_video.php?viewkey=" + key
page = downloader.get(absolute_url)
info = extractor.get_video_info(page)
if info is None:
continue
hd_quality = info['mediaDefinitions'][0]
puts(colored.green("downloading video %s." % info['video_title']))
downloader.save_file(hd_quality["videoUrl"], info['video_title'] + ".mp4")
if __name__ == "__main__":
main()
|
36769
|
from .. import DomainAdaptationDataset, SimpleDataset
SolV4folders = [
"/fast-2/datasets/Solv4_strings_wav/audio/Cello",
"/fast-2/datasets/Solv4_strings_wav/audio/Contrabass",
"/fast-2/datasets/Solv4_strings_wav/audio/Violin",
"/fast-2/datasets/Solv4_strings_wav/audio/Viola"
]
def Solv4Strings_DomainAdaptation(out_database_location, preprocess_function):
return DomainAdaptationDataset(out_database_location, SolV4folders,
preprocess_function, "*.wav", 1e11)
def Solv4Strings_Simple(out_database_location, preprocess_function):
return SimpleDataset(out_database_location, SolV4folders,
preprocess_function, "*.wav", 1e11)
|
36782
|
import pp
def test_mzi():
netlist = """
instances:
CP1:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 10
CP2:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 5
arm_top:
component: mzi_arm
arm_bot:
component: mzi_arm
placements:
arm_bot:
mirror: [0,0,0,10]
ports:
W0: CP1,W0
E0: CP2,W0
connections:
arm_bot,W0: CP1,E0
arm_top,W0: CP1,E1
CP2,E0: arm_bot,E0
CP2,E1: arm_top,E0
"""
return pp.component_from_yaml(netlist)
if __name__ == "__main__":
c = test_mzi()
pp.show(c)
pp.plotgds(c)
|
36809
|
from typing import List, Literal, Optional, Sequence
from pydantic import Field, root_validator, validator
from pydantic.main import BaseModel
from weaverbird.pipeline.steps.utils.base import BaseStep
from weaverbird.pipeline.steps.utils.render_variables import StepWithVariablesMixin
from weaverbird.pipeline.steps.utils.validation import validate_unique_columns
from weaverbird.pipeline.types import ColumnName, PopulatedWithFieldnames, TemplatedVariable
AggregateFn = Literal[
'avg',
'sum',
'min',
'max',
'count',
'count distinct',
'first',
'last',
'count distinct including empty',
]
class Aggregation(BaseModel):
class Config(PopulatedWithFieldnames):
...
new_columns: List[ColumnName] = Field(alias='newcolumns')
agg_function: AggregateFn = Field(alias='aggfunction')
columns: List[ColumnName]
@validator('columns', pre=True)
def validate_unique_columns(cls, value):
return validate_unique_columns(value)
@root_validator(pre=True)
def handle_legacy_syntax(cls, values):
if 'column' in values:
values['columns'] = [values.pop('column')]
if 'newcolumn' in values:
values['new_columns'] = [values.pop('newcolumn')]
return values
class AggregateStep(BaseStep):
name = Field('aggregate', const=True)
on: List[ColumnName] = []
aggregations: Sequence[Aggregation]
keep_original_granularity: Optional[bool] = Field(
default=False, alias='keepOriginalGranularity'
)
class Config(PopulatedWithFieldnames):
...
class AggregationWithVariables(Aggregation):
class Config(PopulatedWithFieldnames):
...
new_columns: List[TemplatedVariable] = Field(alias='newcolumns')
agg_function: TemplatedVariable = Field(alias='aggfunction')
columns: List[TemplatedVariable]
class AggregateStepWithVariables(AggregateStep, StepWithVariablesMixin):
aggregations: Sequence[AggregationWithVariables]
|
36845
|
track = dict(
author_username='alexisbcook',
course_name='Data Cleaning',
course_url='https://www.kaggle.com/learn/data-cleaning',
course_forum_url='https://www.kaggle.com/learn-forum/172650'
)
lessons = [ {'topic': topic_name} for topic_name in
['Handling missing values', #1
'Scaling and normalization', #2
'Parsing dates', #3
'Character encodings', #4
'Inconsistent data entry'] #5
]
notebooks = [
dict(
filename='tut1.ipynb',
lesson_idx=0,
type='tutorial',
dataset_sources=['maxhorowitz/nflplaybyplay2009to2016'],
),
dict(
filename='ex1.ipynb',
lesson_idx=0,
type='exercise',
dataset_sources=['aparnashastry/building-permit-applications-data'],
scriptid=10824396
),
dict(
filename='tut2.ipynb',
lesson_idx=1,
type='tutorial',
),
dict(
filename='ex2.ipynb',
lesson_idx=1,
type='exercise',
dataset_sources=['kemical/kickstarter-projects'],
scriptid=10824404
),
dict(
filename='tut3.ipynb',
lesson_idx=2,
type='tutorial',
dataset_sources=['nasa/landslide-events']
),
dict(
filename='ex3.ipynb',
lesson_idx=2,
type='exercise',
dataset_sources=['usgs/earthquake-database', 'smithsonian/volcanic-eruptions'],
scriptid=10824403
),
dict(
filename='tut4.ipynb',
lesson_idx=3,
type='tutorial',
dataset_sources=['kemical/kickstarter-projects']
),
dict(
filename='ex4.ipynb',
lesson_idx=3,
type='exercise',
dataset_sources=['kwullum/fatal-police-shootings-in-the-us'],
scriptid=10824401
),
dict(
filename='tut5.ipynb',
lesson_idx=4,
type='tutorial',
dataset_sources=['alexisbcook/pakistan-intellectual-capital']
),
dict(
filename='ex5.ipynb',
lesson_idx=4,
type='exercise',
dataset_sources=['alexisbcook/pakistan-intellectual-capital'],
scriptid=10824407
),
]
|
36950
|
import json
try:
import cPickle as pickle
except:
import pickle
def save_json(data, file_path):
with open(file_path, "w") as f:
json.dump(data, f)
def save_json_pretty(data, file_path):
"""save formatted json, use this one for some json config files"""
with open(file_path, "w") as f:
f.write(json.dumps(data, indent=4, sort_keys=True))
def load_json(file_path):
with open(file_path, "r") as f:
return json.load(f)
def save_pickle(data, data_path, highest=False):
protocol = 2 if highest else 0
with open(data_path, "w") as f:
pickle.dump(data, f, protocol=protocol)
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def flat_list_of_lists(l):
"""flatten a list of lists [[1,2], [3,4]] to [1,2,3,4]"""
return [item for sublist in l for item in sublist]
def merge_dicts(list_dicts):
merged_dict = list_dicts[0].copy()
for i in range(1, len(list_dicts)):
merged_dict.update(list_dicts[i])
return merged_dict
|
36982
|
class Solution:
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
k = 0
for n in nums:
k ^= n
return k
|
36987
|
import tkinter as tk
import tkinter.ttk as ttk
from .pixel_canvas import PixelCanvas
class DemoWindow(ttk.Frame):
def __init__(self, master, model_wrapper,
canvas_size=50, window_size=28,
refresh_period=50, test_image=None, **kw):
ttk.Frame.__init__(self, master=master, **kw)
self.master = master
self.model_wrapper = model_wrapper
self.canvas_size = canvas_size
self.window_size = window_size
self.refresh_period = refresh_period
self._create_interface()
if test_image is not None:
self.cnv_orig.set_image(test_image)
self.columnconfigure(0, weight=410, minsize=215)
self.columnconfigure(1, weight=410, minsize=210)
self.columnconfigure(2, weight=140, minsize=65)
self.rowconfigure(0, weight=0, minsize=50)
self.rowconfigure(1, weight=1, minsize=220)
self.rowconfigure(2, weight=0, minsize=0)
self.master.after(50, lambda: master.focus_force())
self.master.after(100, self._reconstruct_image)
def _create_interface(self):
self.frm_controls = ttk.Frame(self, padding=(10, 15, 10, 10))
self.frm_controls.grid(row=0, column=0, columnspan=3, sticky=(tk.N, tk.S, tk.W, tk.E))
self.lbl_draw_mode = ttk.Label(self.frm_controls, text="Drawing Mode:")
self.lbl_line_width = ttk.Label(self.frm_controls, text="Line Width:")
self.lbl_refresh_rate = ttk.Label(self.frm_controls, text="Refresh (ms):")
self.var_draw_mode = tk.IntVar(value=1)
self.rad_draw = ttk.Radiobutton(self.frm_controls, text="Draw", variable=self.var_draw_mode, value=1)
self.rad_erase = ttk.Radiobutton(self.frm_controls, text="Erase", variable=self.var_draw_mode, value=0)
self.btn_clear = ttk.Button(
self.frm_controls, text="Clear Image",
command=lambda: self.cnv_orig.clear_image()
)
self.var_width = tk.StringVar(self.frm_controls)
self.spn_width = tk.Spinbox(
self.frm_controls, values=(1, 2, 3, 4, 5), width=10,
state="readonly", textvariable=self.var_width
)
self.var_rate = tk.StringVar(self.frm_controls)
self.spn_rate = tk.Spinbox(
self.frm_controls, values=(10, 20, 50, 100, 200, 500, 1000), width=10,
state="readonly", textvariable=self.var_rate
)
self.var_bbox = tk.IntVar(value=1)
self.cbx_bbox = ttk.Checkbutton(self.frm_controls, text="Bounding Boxes", variable=self.var_bbox)
self.lbl_draw_mode.grid(row=0, column=0, columnspan=2, sticky=(tk.N, tk.W))
self.lbl_line_width.grid(row=0, column=3, sticky=(tk.N, tk.W))
self.lbl_refresh_rate.grid(row=0, column=4, sticky=(tk.N, tk.W))
self.rad_draw.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.rad_erase.grid(row=1, column=1, sticky=(tk.N, tk.S, tk.W, tk.E), padx=(0, 20))
self.btn_clear.grid(row=1, column=2, sticky=(tk.N, tk.S, tk.W, tk.E), padx=(0, 20))
self.spn_width.grid(row=1, column=3, sticky=(tk.N, tk.S, tk.W, tk.E), padx=(0, 20))
self.spn_rate.grid(row=1, column=4, sticky=(tk.N, tk.S, tk.W, tk.E), padx=(0, 20))
self.cbx_bbox.grid(row=1, column=5, sticky=(tk.N, tk.S, tk.W, tk.E))
self.var_draw_mode.trace("w", lambda *_: self._set_draw_mode(self.var_draw_mode.get() == 1))
self.var_width.trace("w", lambda *_: self.cnv_orig.set_line_width(int(self.var_width.get())))
self.var_rate.trace("w", lambda *_: self._set_refresh_period(int(self.var_rate.get())))
self.var_bbox.trace("w", lambda *_: self._set_bbox_visibility(self.var_bbox.get() == 1))
self.frm_canvas_orig = ttk.Frame(self, padding=(10, 10, 5, 10))
self.frm_canvas_orig.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_canvas_orig.columnconfigure(0, weight=1, minsize=200)
self.frm_canvas_orig.rowconfigure(0, weight=0, minsize=20)
self.frm_canvas_orig.rowconfigure(1, weight=1, minsize=200)
self.lbl_orig = ttk.Label(self.frm_canvas_orig, text="Original Image (draw here):")
self.cnv_orig = PixelCanvas(
self.frm_canvas_orig, self.canvas_size, self.canvas_size, drawable=True,
highlightthickness=0, borderwidth=0, width=400, height=400
)
self.lbl_orig.grid(row=0, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.cnv_orig.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_canvas_rec = ttk.Frame(self, padding=(5, 10, 5, 10))
self.frm_canvas_rec.grid(row=1, column=1, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_canvas_rec.columnconfigure(0, weight=1, minsize=200)
self.frm_canvas_rec.rowconfigure(0, weight=0, minsize=20)
self.frm_canvas_rec.rowconfigure(1, weight=1, minsize=200)
self.lbl_rec = ttk.Label(self.frm_canvas_rec, text="Reconstructed Image:")
self.cnv_rec = PixelCanvas(
self.frm_canvas_rec, self.canvas_size, self.canvas_size, drawable=False,
highlightthickness=0, borderwidth=0, width=400, height=400
)
self.lbl_rec.grid(row=0, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.cnv_rec.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_windows = ttk.Frame(self, padding=(0, 0, 0, 0))
self.frm_windows.grid(row=1, column=2, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_windows.columnconfigure(0, weight=1)
self.frm_canvas_win, self.lbl_win, self.cnv_win = [], [], []
for i in range(3):
self.frm_windows.rowconfigure(i, weight=1)
frm_canvas_win = ttk.Frame(
self.frm_windows,
padding=(5, 10 if i == 0 else 0, 10, 10 if i == 2 else 0)
)
frm_canvas_win.grid(row=i, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
frm_canvas_win.columnconfigure(0, weight=1, minsize=50)
frm_canvas_win.rowconfigure(0, weight=0, minsize=20)
frm_canvas_win.rowconfigure(1, weight=1, minsize=50)
lbl_win = ttk.Label(
frm_canvas_win, text="VAE Rec. #{0}:".format(i+1)
)
cnv_win = PixelCanvas(
frm_canvas_win, self.window_size, self.window_size, drawable=False,
highlightthickness=0, borderwidth=0, width=120, height=120
)
lbl_win.grid(row=0, column=0, sticky=(tk.S, tk.W))
cnv_win.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_canvas_win.append(frm_canvas_win)
self.lbl_win.append(lbl_win)
self.cnv_win.append(cnv_win)
self.lbl_status = ttk.Label(self, borderwidth=1, relief="sunken", padding=(5, 2))
self.lbl_status.grid(row=2, column=0, columnspan=3, sticky=(tk.N, tk.S, tk.W, tk.E))
self.cnv_orig.bind("<Button-2>", lambda *_: self.cnv_orig.clear_image())
self.cnv_orig.bind("<Button-3>", lambda *_: self.cnv_orig.clear_image())
self.var_draw_mode.set(1)
self.var_width.set("3")
self.var_rate.set("50")
self.var_bbox.set(1)
def _reconstruct_image(self):
dig, pos, rec, win, lat, loss = self.model_wrapper.infer(
[self.cnv_orig.get_image()]
)
self.cnv_rec.set_image(rec[0])
self.cnv_rec.set_bbox_positions(pos[0])
self.cnv_orig.set_bbox_positions(pos[0])
for i in range(len(self.cnv_win)):
if i < len(win[0]):
self.cnv_win[i].set_image(win[0][i])
self.cnv_win[i].set_bbox_positions(
[[0.0, -2.0, -2.0]] * i + [[0.99, 0.0, 0.0]]
)
else:
self.cnv_win[i].clear_image()
self.cnv_win[i].set_bbox_positions([])
self.lbl_status.configure(
text="Reconstruction loss (negative log-likelihood): {0:.3f}".format(
abs(loss[0])
)
)
self.master.after(self.refresh_period, self._reconstruct_image)
def _set_refresh_period(self, value):
self.refresh_period = value
def _set_bbox_visibility(self, visible):
self.cnv_orig.set_bbox_visibility(visible)
self.cnv_rec.set_bbox_visibility(visible)
def _set_draw_mode(self, draw):
self.cnv_orig.set_erasing_mode(not draw)
self.cnv_orig.config(cursor=("cross" if draw else "icon"))
|
37009
|
import os
try:
from xdebug.unittesting import XdebugDeferrableTestCase
except:
from SublimeTextXdebug.xdebug.unittesting import XdebugDeferrableTestCase
class TestBreakpointStep(XdebugDeferrableTestCase):
breakpoint_step_file = 'breakpoint_step.php'
breakpoint_step_file_local_path = os.path.join(XdebugDeferrableTestCase.local_path, breakpoint_step_file)
def test_step_into(self):
self.set_breakpoint(self.breakpoint_step_file_local_path, 11)
self.run_command('xdebug_session_start')
yield self.window_has_debug_layout
breakpoint_view = self.get_view_by_title('Xdebug Breakpoint')
context_view = self.get_view_by_title('Xdebug Context')
stack_view = self.get_view_by_title('Xdebug Stack')
self.assertViewContains(breakpoint_view, '=> {file_local_path}\n\t|+| 11'.format(file_local_path=self.breakpoint_step_file_local_path))
self.assertViewIsEmpty(context_view)
self.assertViewIsEmpty(stack_view)
self.send_server_request(path=self.breakpoint_step_file)
def context_and_stack_have_content():
return not self.view_is_empty(context_view) and not self.view_is_empty(stack_view)
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greeting = <uninitialized>')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:11, {{main}}()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
context_view_contents = self.get_contents_of_view(context_view)
stack_view_contents = self.get_contents_of_view(stack_view)
def context_and_stack_have_different_content():
return self.get_contents_of_view(context_view) != context_view_contents and self.get_contents_of_view(stack_view) != stack_view_contents
self.run_command('xdebug_execute', {'command': 'step_into'})
yield context_and_stack_have_different_content
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greet = <uninitialized>')
self.assertViewContains(context_view, '$name = (string) Stranger')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:4, greet()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
context_view_contents = self.get_contents_of_view(context_view)
stack_view_contents = self.get_contents_of_view(stack_view)
def context_and_stack_have_different_content():
return self.get_contents_of_view(context_view) != context_view_contents and self.get_contents_of_view(stack_view) != stack_view_contents
self.run_command('xdebug_execute', {'command': 'step_into'})
yield context_and_stack_have_different_content
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greet = (string) Hi')
self.assertViewContains(context_view, '$name = (string) Stranger')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:5, greet()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
def test_step_out(self):
self.set_breakpoint(self.breakpoint_step_file_local_path, 5)
self.run_command('xdebug_session_start')
yield self.window_has_debug_layout
breakpoint_view = self.get_view_by_title('Xdebug Breakpoint')
context_view = self.get_view_by_title('Xdebug Context')
stack_view = self.get_view_by_title('Xdebug Stack')
self.assertViewContains(breakpoint_view, '=> {file_local_path}\n\t|+| 5'.format(file_local_path=self.breakpoint_step_file_local_path))
self.assertViewIsEmpty(context_view)
self.assertViewIsEmpty(stack_view)
self.send_server_request(path=self.breakpoint_step_file)
def context_and_stack_have_content():
return not self.view_is_empty(context_view) and not self.view_is_empty(stack_view)
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greet = (string) Hi')
self.assertViewContains(context_view, '$name = (string) Stranger')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:5, greet()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
context_view_contents = self.get_contents_of_view(context_view)
stack_view_contents = self.get_contents_of_view(stack_view)
def context_and_stack_have_different_content():
return self.get_contents_of_view(context_view) != context_view_contents and self.get_contents_of_view(stack_view) != stack_view_contents
self.run_command('xdebug_execute', {'command': 'step_out'})
yield context_and_stack_have_different_content
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greeting = (string) Hello Stranger!')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:12, {{main}}()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
def test_step_over(self):
self.set_breakpoint(self.breakpoint_step_file_local_path, 11)
self.run_command('xdebug_session_start')
yield self.window_has_debug_layout
breakpoint_view = self.get_view_by_title('Xdebug Breakpoint')
context_view = self.get_view_by_title('Xdebug Context')
stack_view = self.get_view_by_title('Xdebug Stack')
self.assertViewContains(breakpoint_view, '=> {file_local_path}\n\t|+| 11'.format(file_local_path=self.breakpoint_step_file_local_path))
self.assertViewIsEmpty(context_view)
self.assertViewIsEmpty(stack_view)
self.send_server_request(path=self.breakpoint_step_file)
def context_and_stack_have_content():
return not self.view_is_empty(context_view) and not self.view_is_empty(stack_view)
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greeting = <uninitialized>')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:11, {{main}}()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
context_view_contents = self.get_contents_of_view(context_view)
stack_view_contents = self.get_contents_of_view(stack_view)
def context_and_stack_have_different_content():
return self.get_contents_of_view(context_view) != context_view_contents and self.get_contents_of_view(stack_view) != stack_view_contents
self.run_command('xdebug_execute', {'command': 'step_over'})
yield context_and_stack_have_different_content
yield context_and_stack_have_content
self.assertViewContains(context_view, '$greeting = (string) Hello Stranger!')
self.assertViewContains(stack_view, '[0] file://{remote_path}/{file}:12, {{main}}()'.format(remote_path=self.remote_path, file=self.breakpoint_step_file))
|
37014
|
from unittest import TestCase
from parameterized import parameterized
from tests.test_utils import mock_request_handler
from web.web_auth_utils import remove_webpack_suffixes, is_allowed_during_login
class WebpackSuffixesTest(TestCase):
def test_remove_webpack_suffixes_when_css(self):
normalized = remove_webpack_suffixes('js/chunk-login-vendors.59040343.css')
self.assertEqual('js/chunk-login-vendors.css', normalized)
def test_remove_webpack_suffixes_when_js(self):
normalized = remove_webpack_suffixes('js/login.be16f278.js')
self.assertEqual('js/login.js', normalized)
def test_remove_webpack_suffixes_when_js_map(self):
normalized = remove_webpack_suffixes('js/login.be16f278.js.map')
self.assertEqual('js/login.js.map', normalized)
def test_remove_webpack_suffixes_when_favicon(self):
normalized = remove_webpack_suffixes('favicon.123.ico')
self.assertEqual('favicon.123.ico', normalized)
def test_remove_webpack_suffixes_when_no_suffixes(self):
normalized = remove_webpack_suffixes('css/chunk-login-vendors.css')
self.assertEqual('css/chunk-login-vendors.css', normalized)
def test_remove_webpack_suffixes_when_no_extension(self):
normalized = remove_webpack_suffixes('data/some_file')
self.assertEqual('data/some_file', normalized)
class LoginResourcesTest(TestCase):
@parameterized.expand([
('/favicon.ico'),
('login.html'),
('/js/login.be16f278.js'),
('/js/login.be16f278.js.map'),
('/js/chunk-login-vendors.18e22e7f.js'),
('/js/chunk-login-vendors.18e22e7f.js.map'),
('/img/titleBackground_login.a6c36d4c.jpg'),
('/css/login.8e74be0f.css'),
('/fonts/roboto-latin-400.60fa3c06.woff'),
('/fonts/roboto-latin-400.479970ff.woff2'),
('/fonts/roboto-latin-500.020c97dc.woff2'),
('/fonts/roboto-latin-500.87284894.woff')
])
def test_is_allowed_during_login_when_allowed(self, resource):
request_handler = mock_request_handler(method='GET')
allowed = is_allowed_during_login(resource, 'login.html', request_handler)
self.assertTrue(allowed, 'Resource ' + resource + ' should be allowed, but was not')
def test_is_allowed_during_login_when_prohibited(self):
request_handler = mock_request_handler(method='GET')
resource = 'admin.html'
allowed = is_allowed_during_login(resource, 'login.html', request_handler)
self.assertFalse(allowed, 'Resource ' + resource + ' should NOT be allowed, but WAS')
|
37061
|
from instauto.api.client import ApiClient
from instauto.helpers.post import unlike_post
client = ApiClient.initiate_from_file('.instauto.save')
unlike_post(client, "media_id")
|
37074
|
from pygsti.report.table import ReportTable
from ..util import BaseCase
class TableInstanceTester(BaseCase):
custom_headings = {
'html': 'test',
'python': 'test',
'latex': 'test'
}
def setUp(self):
self.table = ReportTable(self.custom_headings, ['Normal'] * 4) # Four formats
def test_element_accessors(self):
self.table.add_row(['1.0'], ['Normal'])
self.assertTrue('1.0' in self.table)
self.assertEqual(len(self.table), self.table.num_rows)
row_by_key = self.table.row(key=self.table.row_names[0])
row_by_idx = self.table.row(index=0)
self.assertEqual(row_by_key, row_by_idx)
col_by_key = self.table.col(key=self.table.col_names[0])
col_by_idx = self.table.col(index=0)
self.assertEqual(col_by_key, col_by_idx)
def test_to_string(self):
s = str(self.table)
# TODO assert correctness
def test_render_HTML(self):
self.table.add_row(['1.0'], ['Normal'])
self.table.add_row(['1.0'], ['Normal'])
render = self.table.render('html')
# TODO assert correctness
def test_render_LaTeX(self):
self.table.add_row(['1.0'], ['Normal'])
self.table.add_row(['1.0'], ['Normal'])
render = self.table.render('latex')
# TODO assert correctness
def test_finish(self):
self.table.add_row(['1.0'], ['Normal'])
self.table.finish()
# TODO assert correctness
def test_render_raises_on_unknown_format(self):
with self.assertRaises(NotImplementedError):
self.table.render('foobar')
def test_raise_on_invalid_accessor(self):
# XXX are these neccessary? EGN: maybe not - checks invalid inputs, which maybe shouldn't need testing?
with self.assertRaises(KeyError):
self.table['foobar']
with self.assertRaises(KeyError):
self.table.row(key='foobar') # invalid key
with self.assertRaises(ValueError):
self.table.row(index=100000) # out of bounds
with self.assertRaises(ValueError):
self.table.row() # must specify key or index
with self.assertRaises(ValueError):
self.table.row(key='foobar', index=1) # cannot specify key and index
with self.assertRaises(KeyError):
self.table.col(key='foobar') # invalid key
with self.assertRaises(ValueError):
self.table.col(index=100000) # out of bounds
with self.assertRaises(ValueError):
self.table.col() # must specify key or index
with self.assertRaises(ValueError):
self.table.col(key='foobar', index=1) # cannot specify key and index
class CustomHeadingTableTester(TableInstanceTester):
def setUp(self):
self.table = ReportTable([0.1], ['Normal'], self.custom_headings)
def test_labels(self):
self.table.add_row(['1.0'], ['Normal'])
self.assertTrue('1.0' in self.table)
rowLabels = list(self.table.keys())
self.assertEqual(rowLabels, self.table.row_names)
self.assertEqual(len(rowLabels), self.table.num_rows)
self.assertTrue(rowLabels[0] in self.table)
row1Data = self.table[rowLabels[0]]
colLabels = list(row1Data.keys())
self.assertEqual(colLabels, self.table.col_names)
self.assertEqual(len(colLabels), self.table.num_cols)
class CustomHeadingNoFormatTableTester(TableInstanceTester):
def setUp(self):
self.table = ReportTable(self.custom_headings, None)
|
37078
|
from keras.models import Model
from keras.layers import Input
from keras.layers.core import Activation
from keras.layers.convolutional import Conv3D, Deconv3D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
def generator(phase_train=True, params={'z_size':200, 'strides':(2,2,2), 'kernel_size':(4,4,4)}):
"""
Returns a Generator Model with input params and phase_train
Args:
phase_train (boolean): training phase or not
params (dict): Dictionary with model parameters
Returns:
model (keras.Model): Keras Generator model
"""
z_size = params['z_size']
strides = params['strides']
kernel_size = params['kernel_size']
inputs = Input(shape=(1, 1, 1, z_size))
g1 = Deconv3D(filters=512, kernel_size=kernel_size,
strides=(1, 1, 1), kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='valid')(inputs)
g1 = BatchNormalization()(g1, training=phase_train)
g1 = Activation(activation='relu')(g1)
g2 = Deconv3D(filters=256, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(g1)
g2 = BatchNormalization()(g2, training=phase_train)
g2 = Activation(activation='relu')(g2)
g3 = Deconv3D(filters=128, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(g2)
g3 = BatchNormalization()(g3, training=phase_train)
g3 = Activation(activation='relu')(g3)
g4 = Deconv3D(filters=64, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(g3)
g4 = BatchNormalization()(g4, training=phase_train)
g4 = Activation(activation='relu')(g4)
g5 = Deconv3D(filters=1, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(g4)
g5 = BatchNormalization()(g5, training=phase_train)
g5 = Activation(activation='sigmoid')(g5)
model = Model(inputs=inputs, outputs=g5)
model.summary()
return model
def discriminator(phase_train = True, params={'cube_len':64, 'strides':(2,2,2), 'kernel_size':(4,4,4), 'leak_value':0.2}):
"""
Returns a Discriminator Model with input params and phase_train
Args:
phase_train (boolean): training phase or not
params (dict): Dictionary with model parameters
Returns:
model (keras.Model): Keras Discriminator model
"""
cube_len = params['cube_len']
strides = params['strides']
kernel_size = params['kernel_size']
leak_value = params['leak_value']
inputs = Input(shape=(cube_len, cube_len, cube_len, 1))
d1 = Conv3D(filters=64, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(inputs)
d1 = BatchNormalization()(d1, training=phase_train)
d1 = LeakyReLU(leak_value)(d1)
d2 = Conv3D(filters=128, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(d1)
d2 = BatchNormalization()(d2, training=phase_train)
d2 = LeakyReLU(leak_value)(d2)
d3 = Conv3D(filters=256, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(d2)
d3 = BatchNormalization()(d3, training=phase_train)
d3 = LeakyReLU(leak_value)(d3)
d4 = Conv3D(filters=512, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(d3)
d4 = BatchNormalization()(d4, training=phase_train)
d4 = LeakyReLU(leak_value)(d4)
d5 = Conv3D(filters=1, kernel_size=kernel_size,
strides=(1, 1, 1), kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='valid')(d4)
d5 = BatchNormalization()(d5, training=phase_train)
d5 = Activation(activation='sigmoid')(d5)
model = Model(inputs=inputs, outputs=d5)
model.summary()
return model
|
37102
|
import logging
import os
from typing import Generic, List, Type, Any
import torch
import torch.nn as nn
from ..downloading.downloading_utils import from_cache
from ..featurization.featurization_api import T_BatchEncoding, T_Config, PretrainedFeaturizerMixin
class PretrainedModelBase(nn.Module, Generic[T_BatchEncoding, T_Config]):
def __init__(self, config: T_Config):
super().__init__()
self.config = config
def forward(self, batch: T_BatchEncoding):
raise NotImplementedError
@classmethod
def _get_archive_dict(cls) -> dict:
raise NotImplementedError
@classmethod
def get_config_cls(cls) -> Type[T_Config]:
raise NotImplementedError
@classmethod
def get_featurizer_cls(cls) -> Type[PretrainedFeaturizerMixin[Any, T_BatchEncoding, T_Config]]:
raise NotImplementedError
@classmethod
def from_pretrained(cls,
pretrained_name: str, *,
excluded: List[str] = None,
config: T_Config = None) -> "PretrainedModelBase[T_BatchEncoding, T_Config]":
archive_dict = cls._get_archive_dict()
file_path = from_cache(pretrained_name, archive_dict, 'pt')
if not file_path:
file_path = os.path.expanduser(pretrained_name)
if not os.path.exists(file_path):
raise FileNotFoundError(file_path)
if not config:
raise AttributeError('Set \'config\' attribute when using local path to weights.')
if not config:
config_cls = cls.get_config_cls()
config = config_cls.from_pretrained(pretrained_name)
model = cls(config)
model.load_weights(file_path, excluded=excluded)
return model
def init_weights(self, init_type: str):
for p in self.parameters():
if p.dim() > 1:
if init_type == 'uniform':
nn.init.xavier_uniform_(p)
elif init_type == 'normal':
nn.init.xavier_normal_(p)
else:
raise NotImplementedError()
def _remove_excluded(self, dictionary: dict, *, excluded: List[str] = None):
excluded = excluded if excluded else []
return {k: v for k, v in dictionary.items() if all(k.split('.')[0] != p for p in excluded)}
def load_weights(self, file_path: str, *, excluded: List[str] = None):
state_dict = torch.load(file_path, map_location='cpu')
state_dict = self._remove_excluded(state_dict, excluded=excluded)
result = self.load_state_dict(state_dict, strict=False)
if len(result.missing_keys) > 0:
logging.info(f'Missing keys when loading: {result.missing_keys}')
if len(result.unexpected_keys) > 0:
logging.warning(f'Unexpected keys when loading: {result.unexpected_keys}')
def save_weights(self, file_path: str, *, excluded: List[str] = None):
state_dict = self.state_dict()
state_dict = self._remove_excluded(state_dict, excluded=excluded)
torch.save(state_dict, file_path)
|
37114
|
import pytest
from django.urls import reverse
class TestImageUpload:
@pytest.mark.django_db
def test_upload_image_not_authenticated(self, client, small_jpeg_io):
upload_url = reverse("cast:api:upload_image")
small_jpeg_io.seek(0)
r = client.post(upload_url, {"original": small_jpeg_io})
# redirect to login
assert r.status_code == 302
@pytest.mark.django_db
def test_upload_image_authenticated(self, client, user, small_jpeg_io):
# login
r = client.login(username=user.username, password=<PASSWORD>)
# upload
upload_url = reverse("cast:api:upload_image")
small_jpeg_io.seek(0)
r = client.post(upload_url, {"original": small_jpeg_io})
assert r.status_code == 201
assert int(r.content.decode("utf-8")) > 0
|
37168
|
import logging
l = logging.getLogger("archr.analyzers.datascout")
from ..errors import ArchrError
from . import Analyzer
# Keystone engine 0.9.2 (incorrectly) defaults to radix 16. so we'd better off only using 0x-prefixed integers from now.
# See the related PR: https://github.com/keystone-engine/keystone/pull/382
# and the related issue: https://github.com/keystone-engine/keystone/issues/436
class DataScoutAnalyzer(Analyzer):
"""
Grabs the environment and auxiliary vector from the target.
"""
REQUIRED_IMPLANT = "shellphish_qemu"
def __init__(self, target, analyzer=None):
super().__init__(target)
self.env = None
self.argv = None
self.auxv = None
self.map = None
self.analyzer = analyzer
def _pushstr(self, s):
"""
push a string onto stack
"""
def _cutstr(bits, little=True):
w = bits // 8 # word size
byte_order = -1 if little else 1
n = ["0"] + [s[i:i + w].ljust(w, "\0")[::byte_order].encode('utf-8').hex() for i in range(0, len(s), w)][::-1]
return n
if self.target.target_arch == 'x86_64':
elems = _cutstr(64)
return "".join("mov rax, 0x%s; push rax; " % word for word in elems)
elif self.target.target_arch == 'i386':
elems = _cutstr(32)
return "".join("mov eax, 0x%s; push eax; " % word for word in elems)
elif self.target.target_arch in ('mips', 'mipsel'):
elems = _cutstr(32, little=self.target.target_arch != 'mips')
return "".join("li $t0, 0x%s; addi $sp, $sp, -4; sw $t0, 0($sp);" % word for word in elems)
elif self.target.target_arch == 'arm':
elems = _cutstr(32)
return "".join(f"movw r0, #0x{word} & 0xffff; movt r0, #0x{word} >> 16; push {{r0}};" for word in elems)
else:
raise NotImplementedError()
def read_file_shellcode(self, filename):
"""
shellcode to read the content of a file
"""
if self.target.target_arch == 'x86_64':
return (
self._pushstr(filename) +
"mov rdi, rsp; xor rsi, rsi; xor rdx, rdx; mov rax, 2; syscall;" + # fd = open(path, O_RDONLY, 0)
"mov r12, rax; sub rsp, 0x1000;" + # alloca 0x1000
"loop_head:" +
"xor rax, rax; mov rdi, r12; mov rsi, rsp; mov rdx, 0x1000; syscall;" + # n = read(fd, rsp, 0x1000)
"mov r13, rax;" + # save n
"mov rax, 1; mov rdi, 1; mov rsi, rsp; mov rdx, r13; syscall;" + # write(1, rsp, n)
"test r13, r13; jnz loop_head;" # loop untill we are done with the file
)
elif self.target.target_arch == 'i386':
return (
self._pushstr(filename) +
"mov ebx, esp; xor ecx, ecx; xor edx, edx; mov eax, 5; int 0x80;" + # n = open(path, O_RDONLY, 0)
"mov esi, eax; sub esp, 0x1000;" + # alloca 0x1000, fd = esi
"loop_head:" +
"mov eax, 3; mov ebx, esi; mov ecx, esp; mov edx, 0x1000; int 0x80;" + # n = read(fd, rsp, 0x1000)
"mov edi, eax;"+ # save n
"mov eax, 4; mov ebx, 1; mov ecx, esp; mov edx, edi; int 0x80;" + # write(1, rsp, n)
"test edi, edi; jnz loop_head;" # loop untill we are done with the file
)
elif self.target.target_arch in ('mips', 'mipsel'):
return (
self._pushstr(filename) +
"move $a0, $sp; xor $a1, $a1, $a1; xor $a2, $a2, $a2; li $v0, 0xfa5; syscall;" + # n = open(path, O_RDONLY, 0)
"move $s0, $v0; li $a0, 0x1000; sub $sp, $sp, $a0;" + # alloca 0x1000, fd = $s0
"loop_head:" +
"li $v0, 0xfa3; move $a0, $s0; move $a1, $sp; li $a2, 0x1000; syscall;" + # n = read(fd, rsp, 0x1000)
"move $s1, $v0;" + # save n
"li $v0, 0xfa4; li $a0, 1; move $a1, $sp; move $a2, $s1; syscall;" + # write(1, rsp, n)
"bne $s1, 0, loop_head;" # loop untill we are done with the file
)
elif self.target.target_arch == 'arm':
return (
self._pushstr(filename) +
"mov r0, sp; eor r1, r1; eor r2, r2; mov r7, #5; svc 0;" + # n = open(path, O_RDONLY, 0)
"mov r8, r0; sub sp, sp, 0x1000;" + # alloca 0x1000, fd = $r8
"loop_head:" +
"mov r7, #3; mov r0, r8; mov r1, sp; mov r2, 0x1000; svc 0;" + # n = read(fd, rsp, 0x1000)
"mov r9, r0;" + # save n to r9
"mov r7, #4; mov r0, 1; mov r1, sp; mov r2, r9; svc 0;" + # write(1, rsp, n)
"cmp r9, #0; bne loop_head;" # loop untill we are done with the file
)
else:
raise NotImplementedError("Unknown target architecure: \"%s\"!" % self.target.target_arch)
def echo_shellcode(self, what):
if self.target.target_arch == 'x86_64':
return (
self._pushstr(what) +
"mov rdi, 1; mov rsi, rsp; mov rdx, %#x; mov rax, 1; syscall;" % len(what) # n = write(1, rsp, 0x1000)
)
elif self.target.target_arch == 'i386':
return (
self._pushstr(what) +
"mov ebx, 1; mov ecx, esp; mov edx, %#x; mov eax, 4; int 0x80;" % len(what) # n = write(1, esp, 0x1000)
)
elif self.target.target_arch in ('mips', 'mipsel'):
return (
self._pushstr(what) +
"li $a0, 1; move $a1, $sp; li $a2, %#x; li $v0, 0xfa4; syscall;" % len(what) # n = write(1, sp, 0x1000)
)
elif self.target.target_arch == 'arm':
return (
self._pushstr(what) +
"mov r0, #1; mov r1, sp; mov r2, #%#x; mov r7, #4; svc 0;" % len(what) # n = write(1, sp, 0x1000)
)
else:
raise NotImplementedError()
def brk_shellcode(self):
if self.target.target_arch == 'x86_64':
return "mov rax, 0xc; xor rdi, rdi; syscall; mov rdi, rax; add rdi, 0x1000; mov rax, 0xc; syscall;"
elif self.target.target_arch == 'i386':
# n = brk 0
# brk n + 0x1000
return "mov eax, 0x2d; xor ebx, ebx; int 0x80; mov ebx, eax; add ebx, 0x1000; mov eax, 0x2d; int 0x80;"
elif self.target.target_arch in ('mips', 'mipsel'):
# n = brk 0
# brk n + 0x1000
return "xor $a0, $a0, $a0; li $v0, 0xfcd; syscall; add $a0, $v0, 0x1000; li $v0, 0xfcd; syscall;"
elif self.target.target_arch == 'arm':
# n = brk 0
# brk n + 0x1000
return "eor r0, r0; mov r7, #0x2d; svc 0; add r0, #0x1000; mov r7, #0x2d; svc 0;"
else:
raise NotImplementedError()
def exit_shellcode(self, exit_code=42):
if self.target.target_arch == 'x86_64':
return "mov rdi, %#x; mov rax, 0x3c; syscall;" % exit_code # exit(code)
elif self.target.target_arch == 'i386':
return "mov ebx, %#x; mov eax, 1; int 0x80;" % exit_code # exit(code)
elif self.target.target_arch in ('mips', 'mipsel'):
return "li $a0, %#x; li $v0, 0xfa1; syscall;" % exit_code # exit(code)
elif self.target.target_arch == 'arm':
return "mov r0, #%#x; mov r7, #1; svc 0;" % exit_code # exit(code)
else:
raise NotImplementedError()
def run_shellcode(self, shellcode, aslr=False, **kwargs):
exit_code = 42
# build the args
if self.analyzer:
args = self.analyzer._build_command()
else:
args = self.target.target_args
# run command within the shellcode context
with self.target.shellcode_context(args, asm_code=shellcode+self.exit_shellcode(exit_code=exit_code), aslr=aslr, **kwargs) as p:
output, stderr = p.communicate()
if p.returncode != exit_code:
raise ArchrError("DataScout failed to get info from the target process.\n"
"stdout: %s\nstderr: %s" % (output, stderr))
return output
def fire(self, aslr=False, **kwargs): #pylint:disable=arguments-differ
if self.target.target_os == 'cgc':
return [], [], b'', {}
if not self.argv:
output = self.run_shellcode(self.read_file_shellcode("/proc/self/cmdline"), aslr=aslr, **kwargs)
self.argv = output.split(b'\0')[:-1]
if not self.env:
output = self.run_shellcode(self.read_file_shellcode("/proc/self/environ"), aslr=aslr, **kwargs)
self.env = output.split(b'\0')[:-1]
if not self.auxv:
output = self.run_shellcode(self.read_file_shellcode("/proc/self/auxv"), aslr=aslr, **kwargs)
self.auxv = output
if not self.map:
output = self.run_shellcode(self.brk_shellcode()+self.read_file_shellcode("/proc/self/maps"), aslr=aslr, **kwargs)
self.map = parse_proc_maps(output)
return self.argv, self.env, self.auxv, self.map
from ..utils import parse_proc_maps
|
37217
|
import datetime as dt
import unittest
from AShareData import set_global_config
from AShareData.model import *
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
set_global_config('config.json')
def test_something(self):
self.assertEqual(True, False)
@staticmethod
def test_FF3factor_return():
model = FamaFrench3FactorModel()
smb = SMBandHMLCompositor(model)
date = dt.datetime(2021, 3, 9)
pre_date = dt.datetime(2021, 3, 8)
pre_month_date = dt.datetime(2021, 2, 26)
smb.compute_factor_return(balance_date=pre_date, pre_date=pre_date, date=date,
rebalance_marker='D', period_marker='D')
smb.compute_factor_return(balance_date=pre_month_date, pre_date=pre_date, date=date,
rebalance_marker='M', period_marker='D')
smb.compute_factor_return(balance_date=pre_month_date, pre_date=pre_month_date, date=date,
rebalance_marker='M', period_marker='M')
@staticmethod
def test_FFC4_factor_return():
model = FamaFrenchCarhart4FactorModel()
umd = UMDCompositor(model)
date = dt.datetime(2021, 3, 9)
pre_date = dt.datetime(2021, 3, 8)
pre_month_date = dt.datetime(2021, 2, 26)
umd.compute_factor_return(balance_date=pre_date, pre_date=pre_date, date=date,
rebalance_marker='D', period_marker='D')
umd.compute_factor_return(balance_date=pre_month_date, pre_date=pre_date, date=date,
rebalance_marker='M', period_marker='D')
umd.compute_factor_return(balance_date=pre_month_date, pre_date=pre_month_date, date=date,
rebalance_marker='M', period_marker='M')
if __name__ == '__main__':
unittest.main()
|
37259
|
import contextlib
import random
import string
from password_strength import PasswordStats
from redbot.core import commands
from redbot.core.utils import chat_formatting as cf
from .word_list import *
GREEN_CIRCLE = "\N{LARGE GREEN CIRCLE}"
YELLOW_CIRCLE = "\N{LARGE YELLOW CIRCLE}"
ORANGE_CIRCLE = "\N{LARGE ORANGE CIRCLE}"
RED_CIRCLE = "\N{LARGE RED CIRCLE}"
class Encryptor(commands.Cog):
"""
Create, and validify the strength of passwords.
"""
__author__ = ["Kreusada"]
__version__ = "1.1.0"
def __init__(self, bot):
self.bot = bot
def format_help_for_context(self, ctx: commands.Context) -> str:
context = super().format_help_for_context(ctx)
authors = ", ".join(self.__author__)
return f"{context}\n\nAuthor: {authors}\nVersion: {self.__version__}"
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete"""
return
def cog_unload(self):
with contextlib.suppress(Exception):
self.bot.remove_dev_env_value("encryptor")
async def initialize(self) -> None:
if 719988449867989142 in self.bot.owner_ids:
with contextlib.suppress(Exception):
self.bot.add_dev_env_value("encryptor", lambda x: self)
@commands.group()
async def password(self, ctx):
"""
Create, and validify the strength of passwords.
"""
pass
@password.group(name="generate")
async def password_generate(self, ctx):
"""Generate passwords."""
pass
@password_generate.command(name="complex")
async def password_generate_complex(self, ctx):
"""Generate a complex password."""
await ctx.send(
"".join(
random.choice(string.ascii_letters[:94]) for i in range(random.randint(20, 35))
)
)
@password_generate.command(name="strong")
async def password_generate_strong(self, ctx, delimeter: str = ""):
"""
Generate a strong password.
**Arguments**
* ``<delimeter>``: The character used to seperate each random word. Defaults to "-"
"""
d = delimeter
rc = random.choice
rr = random.randint
await ctx.send(
d.join(rc(RANDOM_WORDS).capitalize() for i in range(3)) + f"{d}{rr(1,1000)}"
)
@password.command(name="strength")
async def password_strength(self, ctx, password: str):
"""Validate a passwords strength."""
conv = PasswordStats(password)
converter = conv.strength()
if converter < 0.250:
emoji = RED_CIRCLE
text = "This is a **weak** password."
elif converter > 0.250 and converter < 0.500:
emoji = ORANGE_CIRCLE
text = "This is an **okay** password."
elif converter > 0.500 and converter < 0.750:
emoji = YELLOW_CIRCLE
text = "This is a **good** password!"
else:
emoji = GREEN_CIRCLE
text = "This is an **excellent** password!"
await ctx.maybe_send_embed(
f"**Strength rating: {round(converter * 100)}%** {emoji}\n{cf.quote(text)}"
)
|
37266
|
from sympy import *
# Implementation of QuaternionBase<Derived>::toRotationMatrix(void).
# The quaternion q is given as a list [qw, qx, qy, qz].
def QuaternionToRotationMatrix(q):
tx = 2 * q[1]
ty = 2 * q[2]
tz = 2 * q[3]
twx = tx * q[0]
twy = ty * q[0]
twz = tz * q[0]
txx = tx * q[1]
txy = ty * q[1]
txz = tz * q[1]
tyy = ty * q[2]
tyz = tz * q[2]
tzz = tz * q[3]
return Matrix([[1 - (tyy + tzz), txy - twz, txz + twy],
[txy + twz, 1 - (txx + tzz), tyz - twx],
[txz - twy, tyz + twx, 1 - (txx + tyy)]])
# Implementation of SO3Group<Scalar> expAndTheta().
# Only implementing the first case (of very small rotation) since we take the Jacobian at zero.
def SO3exp(omega):
theta = omega.norm()
theta_sq = theta**2
half_theta = theta / 2
theta_po4 = theta_sq * theta_sq
imag_factor = Rational(1, 2) - Rational(1, 48) * theta_sq + Rational(1, 3840) * theta_po4;
real_factor = 1 - Rational(1, 2) * theta_sq + Rational(1, 384) * theta_po4;
# return SO3Group<Scalar>(Eigen::Quaternion<Scalar>(
# real_factor, imag_factor * omega.x(), imag_factor * omega.y(),
# imag_factor * omega.z()));
qw = real_factor
qx = imag_factor * omega[0]
qy = imag_factor * omega[1]
qz = imag_factor * omega[2]
return QuaternionToRotationMatrix([qw, qx, qy, qz])
# Implementation of SE3Group<Scalar> exp().
# Only implementing the first case (of small rotation) since we take the Jacobian at zero.
def SE3exp(tangent):
omega = Matrix(tangent[3:6])
V = SO3exp(omega)
rotation = V
translation = V * Matrix(tangent[0:3])
return rotation.row_join(translation)
# Main
init_printing(use_unicode=True)
print('Variant 1')
print('')
# Define the tangent vector with symbolic elements T_0 to T_5.
# (For a matrix, use: Matrix(3, 1, lambda i,j:var('S_%d%d' % (i,j))) )
T = Matrix(6, 1, lambda i,j:var('T_%d' % (i)))
# Compute transformation matrix from tangent vector.
T_matrix = SE3exp(T)
# Define the vector current_T * src:
S = Matrix(3, 1, lambda i,j:var('S_%d' % (i)))
# Matrix-vector multiplication with homogeneous vector:
result = T_matrix * S.col_join(Matrix([1]))
# Compute Jacobian:
# (Note: The transpose is needed for stacking the matrix columns (instead of rows) into a vector.)
jac = result.transpose().reshape(result.rows * result.cols, 1).jacobian(T)
# Take Jacobian at zero:
jac_subs = jac.subs([(T[0], 0), (T[1], 0), (T[2], 0), (T[3], 0), (T[4], 0), (T[5], 0)])
# Simplify and output:
jac_subs_simple = simplify(jac_subs)
pprint(jac_subs_simple)
print('')
print('')
print('Variant 2')
print('')
# Treat the function of which we want to determine the derivative as a list of nested functions.
# This makes it easier to compute the derivative of each part, simplify it, and concatenate the results
# using the chain rule.
### Define the function of which the Jacobian shall be taken ###
# Matrix-vector multiplication with homogeneous vector:
def MatrixVectorMultiplyHomogeneous(matrix, vector):
return matrix * vector.col_join(Matrix([1]))
# Define the vector current_T * src:
S = Matrix(3, 1, lambda i,j:var('S_%d' % (i)))
# The list of nested functions. They will be evaluated from right to left
# (this is to match the way they would be written in math: f(g(x)).)
functions = [lambda matrix : MatrixVectorMultiplyHomogeneous(matrix, S), SE3exp]
### Define the variables wrt. to take the Jacobian, and the position for evaluation ###
# Chain rule:
# d(f(g(x))) / dx = (df/dy)(g(x)) * dg/dx
# Define the parameter with respect to take the Jacobian, y in the formula above:
parameters = Matrix(6, 1, lambda i,j:var('T_%d' % (i)))
# Set the position at which to take the Jacobian, g(x) in the formula above:
parameter_values = zeros(6, 1)
### Automatic Jacobian calculation, no need to modify anything beyond this point ###
# Jacobian from previous step, dg/dx in the formula above:
previous_jacobian = 1
# TODO: Test whether this works with non-matrix functions.
def ComputeValueAndJacobian(function, parameters, parameter_values):
# Evaluate the function.
values = function(parameter_values)
# Compute the Jacobian.
symbolic_values = function(parameters)
symbolic_values_vector = symbolic_values.transpose().reshape(symbolic_values.rows * symbolic_values.cols, 1)
parameters_vector = parameters.transpose().reshape(parameters.rows * parameters.cols, 1)
jacobian = symbolic_values_vector.jacobian(parameters_vector)
# Set in the evaluation point.
for row in range(0, parameters.rows):
for col in range(0, parameters.cols):
jacobian = jacobian.subs(parameters[row, col], parameter_values[row, col])
# Simplify the jacobian.
jacobian = simplify(jacobian)
return (values, jacobian)
# Print info about initial state.
print('Taking the Jacobian of these functions (sorted from inner to outer):')
for i in range(len(functions) - 1, -1, -1):
print(str(functions[i]))
print('with respect to:')
pprint(parameters)
print('at position:')
pprint(parameter_values)
print('')
# Loop over all functions:
for i in range(len(functions) - 1, -1, -1):
# Compute value and Jacobian of this function.
(values, jacobian) = ComputeValueAndJacobian(functions[i], parameters, parameter_values)
# Update parameter_values
parameter_values = values
# Update parameters (create a new symbolic vector of the same size as parameter_values)
parameters = Matrix(values.rows, values.cols, lambda i,j:var('T_%d%d' % (i,j)))
# Concatenate this Jacobian with the previous one according to the chain rule:
previous_jacobian = jacobian * previous_jacobian
# Print intermediate result
print('Intermediate step ' + str(len(functions) - i) + ', for ' + str(functions[i]))
print('Position after function evaluation (function value):')
pprint(parameter_values)
print('Jacobian of this function wrt. its input only:')
pprint(jacobian)
print('Cumulative Jacobian wrt. the innermost parameter:')
pprint(previous_jacobian)
print('')
# Print final result
print('Final result:')
pprint(previous_jacobian)
|
37279
|
import os
from importlib import import_module
from django.apps import apps
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.serializer import serializer_factory
from django.db.models import ForeignKey, ManyToManyField
from django.utils.inspect import get_func_args
from django.utils.module_loading import module_dir
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
def fullname(o):
return o.__module__ + "." + o.__class__.__name__
class OperationWriter:
def __init__(self, operation, indentation=2):
self.operation = operation
self.buff = []
self.indentation = indentation
self.data = []
def serialize(self, app):
d = {}
def _write(_arg_name, _arg_value):
if _arg_name in self.operation.serialization_expand_args and isinstance(_arg_value, (list, tuple, dict)):
if isinstance(_arg_value, dict):
ds = {}
for a, b in _arg_value.items():
if any([isinstance(b, str), isinstance(b, list), isinstance(b, dict), isinstance(b, bool), isinstance(b, float), isinstance(b, int)]) or b is not None:
ds[a] = b
else:
ds[a] = str(b)
d[_arg_name] = ds
else:
f = []
for item in _arg_value:
if isinstance(item, tuple):
if len(item) == 2:
props = {}
i = item[1].__dict__
props["type_name"] = fullname(item[1])
props["choices"] = i.get("choices", None)
props["blank"] = i.get("blank", True)
props["is_null"] = i.get("null", True)
props["primary_key"] = i.get("primary_key", False)
props["help_text"] = i.get("help_text", '')
props["max_length"] = i.get("max_length", None)
props["verbose_name"] = i.get("verbose_name", None)
if "default" in i:
props["default"] = str(i["default"]) if type(i["default"]) not in [set, list, dict, int, float, bool, type(None)] else i["default"]
else:
props["default"] = None
f.append({'name': str(item[0]), 'props': props})
else:
f.append(list(item))
elif (
any([isinstance(item, str), isinstance(item, list), isinstance(item, dict), isinstance(item, bool), isinstance(item, float), isinstance(item, int)])
or item is None
):
f.append(item)
else:
f.append(str(item))
d[_arg_name] = f
elif isinstance(_arg_value, ForeignKey):
ab = {
"many_to_many": bool(_arg_value.many_to_many),
"many_to_one": bool(_arg_value.many_to_one),
"one_to_many": bool(_arg_value.one_to_many),
"one_to_one": bool(_arg_value.one_to_one),
"field_str": str(_arg_value),
"to": str(_arg_value.remote_field.model).replace("__fake__.", "").replace("<class", "").replace("'", "").replace(">", "").replace(" ", ""),
}
d[_arg_name] = ab
d["related"] = True
elif isinstance(_arg_value, ManyToManyField):
ab = {
"many_to_many": bool(_arg_value.many_to_many),
"many_to_one": bool(_arg_value.many_to_one),
"one_to_many": bool(_arg_value.one_to_many),
"one_to_one": bool(_arg_value.one_to_one),
"field_str": str(_arg_value),
"to": str(_arg_value.remote_field.model).replace("__fake__.", "").replace("<class", "").replace("'", "").replace(">", "").replace(" ", ""),
}
d[_arg_name] = ab
d["related"] = True
elif (
any(
[
isinstance(_arg_value, str),
isinstance(_arg_value, list),
isinstance(_arg_value, dict),
isinstance(_arg_value, bool),
isinstance(_arg_value, float),
isinstance(_arg_value, int),
]
)
or _arg_value is None
):
d[_arg_name] = _arg_value
else:
d[_arg_name] = str(_arg_value)
name, args, kwargs = self.operation.deconstruct()
operation_args = get_func_args(self.operation.__init__)
for i, arg in enumerate(args):
arg_value = arg
arg_name = operation_args[i]
_write(arg_name, arg_value)
i = len(args)
for arg_name in operation_args[i:]:
if arg_name in kwargs:
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
if "name" in d:
d["name"] = app + "." + d["name"]
return d
class MigrationWriter:
"""
Take a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
def as_list(self, app):
operations = []
for operation in self.migration.operations:
operations.append(OperationWriter(operation).serialize(app))
return operations
@property
def basedir(self):
migrations_package_name, _ = MigrationLoader.migrations_module(self.migration.app_label)
if migrations_package_name is None:
raise ValueError("Django can't create migrations for app '%s' because " "migrations have been disabled via the MIGRATION_MODULES " "setting." % self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
except ImportError:
pass
else:
try:
return module_dir(migrations_module)
except ValueError:
pass
# Alright, see if it's a direct submodule of the app
app_config = apps.get_app_config(self.migration.app_label)
maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".")
if app_config.name == maybe_app_name:
return os.path.join(app_config.path, migrations_package_basename)
# In case of using MIGRATION_MODULES setting and the custom package
# doesn't exist, create one, starting from an existing package
existing_dirs, missing_dirs = migrations_package_name.split("."), []
while existing_dirs:
missing_dirs.insert(0, existing_dirs.pop(-1))
try:
base_module = import_module(".".join(existing_dirs))
except ImportError:
continue
else:
try:
base_dir = module_dir(base_module)
except ValueError:
continue
else:
break
else:
raise ValueError(
"Could not locate an appropriate location to create " "migrations package %s. Make sure the toplevel " "package exists and can be imported." % migrations_package_name
)
final_dir = os.path.join(base_dir, *missing_dirs)
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
for missing_dir in missing_dirs:
base_dir = os.path.join(base_dir, missing_dir)
with open(os.path.join(base_dir, "__init__.py"), "w"):
pass
return final_dir
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
return os.path.join(self.basedir, self.filename)
@classmethod
def serialize(cls, value):
return serializer_factory(value).serialize()
|
37307
|
import collections
import datetime
import logging
import os
import sys
from pathlib import Path
import numpy as np
import pdfkit as pdfkit
from bs4 import BeautifulSoup
from sklearn.metrics import mean_absolute_error, mean_squared_error, confusion_matrix, classification_report, \
accuracy_score
from tldextract import tldextract
from sklearn.externals import joblib
from coffeeandnoodles.core.util import get_md5_from_string
from trustworthiness.config import DeFactoConfig
from trustworthiness.definitions import DATASET_3C_SITES_PATH, DATASET_MICROSOFT_PATH_PAGES_MISSING, \
DATASET_MICROSOFT_PATH_PAGES_CACHED, ENC_WEB_DOMAIN, ENC_WEB_DOMAIN_SUFFIX, DATASET_MICROSOFT_PATH, OUTPUT_FOLDER, \
ENC_TAGS
import re
config = DeFactoConfig()
def filterTerm(word):
if word is not None:
temp = word.lower()
return re.sub(r"[^A-Za-z]+", '', temp)
else:
return ''
def print_report_regression(clf_name, predictions, y_test, targets):
print('MAE', mean_absolute_error(y_test, predictions))
print('RMSE', np.math.sqrt(mean_squared_error(y_test, predictions)))
print("-----------------------------------------------------------------------")
def print_report(clf_name, predictions, y_test, targets):
print("Classifier: ", clf_name)
print(confusion_matrix(y_test, predictions))
print("accuracy: ", accuracy_score(y_test, predictions))
print(classification_report(y_test, predictions, target_names=targets))
# print(":: recall: ", recall_score(y_test, predictions, average='weighted'))
# print(":: precision: ", precision_score(y_test, predictions, average='weighted'))
# print(":: f1: ", f1_score(y_test, predictions, average='weighted'))
print("-----------------------------------------------------------------------")
def get_logger(name, dir, file_level=logging.DEBUG, console_level=logging.INFO):
try:
logger = logging.getLogger(name)
if len(logger.handlers) == 0:
now = datetime.datetime.now()
filename = dir + name + '_' + now.strftime("%Y-%m-%d") + '.log'
formatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
fileHandler = logging.FileHandler(filename)
fileHandler.setFormatter(formatter)
fileHandler.setLevel(file_level)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
consoleHandler.setLevel(console_level)
logger.setLevel(logging.DEBUG)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
logger.propagate = False
return logger
except:
raise
def get_html_file_path(url):
path = url.replace('http://', '')
last = path.split('/')[-1]
path_root = None
if ('.html' not in last) and ('.htm' not in last) and ('.shtml' not in last):
if path[-1] != '/':
path = path + '/'
path_root1 = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path + 'index.html')
path_root2 = Path(DATASET_MICROSOFT_PATH_PAGES_MISSING + path + 'index.html')
else:
path_root1 = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path)
path_root2 = Path(DATASET_MICROSOFT_PATH_PAGES_MISSING + path)
if path_root1.exists():
path_root = path_root1
elif path_root2.exists():
path_root = path_root2
else:
# sometimes the last part is not a folder, but the file itself without the ".html" , try it as a last attempt
path_root3a = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.html')
path_root3b = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.htm')
path_root3c = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.shtml')
if path_root3a.exists():
path_root = path_root3a
elif path_root3b.exists():
path_root = path_root3b
elif path_root3c.exists():
path_root = path_root3c
else:
# url_broken.append(url)
raise Exception(
':: this should not happen, double check core/web/credibility/fix_dataset_microsoft.py | url = ' + url)
return path_root
def save_encoder_html2seq(folder_html_data):
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
config.logger.info('get_encoder_html2seq()')
try:
tags_set = []
#sentences = []
tot_files = 0
#my_file = Path(folder_html_data + 'features.html2seq.pkl')
my_encoder = Path(ENC_TAGS)
#path_html2seq = folder_html_data + 'html2seq/'
#path_html = folder_html_data + 'html/'
#path_text = folder_html_data + 'text/'
for dirpath, dirs, files in os.walk(folder_html_data):
for file_html in files:
if file_html.endswith('.txt'):
tot_files += 1
config.logger.info('processing file ' + str(tot_files) + ' - ' + str(len(tags_set)))
# get tags
tags = []
soup = BeautifulSoup(open(os.path.join(dirpath, file_html)), "html.parser")
html = soup.prettify()
for line in html.split('\n'):
if isinstance(line, str) and len(line.strip()) > 0:
if (line.strip()[0] == '<') and (line.strip()[0:2] != '<!'):
if len(line.split()) > 1:
tags.append(line.split()[0] + '>')
else:
tags.append(line.split()[0])
elif (line.strip()[0:2] == '</' and line.strip()[0:2] != '<!'):
tags.append(line.split()[0])
if len(tags) > 0:
#sentences.append(tags)
tags_set.extend(tags)
tags_set = list(set(tags_set))
else:
config.logger.info('no tags for this file...')
config.logger.info('saving dump')
le.fit(tags_set)
joblib.dump(le, str(my_encoder))
config.logger.info('tot files: ' + str(tot_files))
config.logger.info('dictionary size: ' + str(len(tags_set)))
except Exception as e:
config.logger.error(repr(e))
raise
def save_encoder_domain_and_suffix():
import pandas as pd
from sklearn import preprocessing
le1 = preprocessing.LabelEncoder()
le2 = preprocessing.LabelEncoder()
domain_s = ['com']
domain_s = ['']
domain = ['']
df_sites = pd.read_csv(DATASET_3C_SITES_PATH, na_values=0, delimiter=',', usecols=['document_url'])
for index, row in df_sites.iterrows():
url = str(row[0])
print(index, url)
try:
o = tldextract.extract(url)
if o.suffix is not None:
domain_s.append(str(o.suffix).lower())
if o.domain is not None:
domain.append(str(o.domain).lower())
except:
continue
# appending upper level domains, from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
# Version 2018040300, Last Updated Tue Apr 3 07:07:01 2018 UTC
df = pd.read_csv(config.datasets + 'data/iana/org/TLD/tlds-alpha-by-domain.txt', sep=" ", header=None)
for index, row in df.iterrows():
print(index, row[0])
domain.append(str(row[0]).lower())
df = pd.read_csv(DATASET_MICROSOFT_PATH, delimiter='\t', header=0)
for index, row in df.iterrows():
url = str(row[3])
print(index, url)
try:
o = tldextract.extract(url)
if o.suffix is not None:
domain_s.append(str(o.suffix).lower())
if o.domain is not None:
domain.append(str(o.domain).lower())
except:
continue
le1.fit(domain)
joblib.dump(le1, ENC_WEB_DOMAIN)
print(le1.classes_)
le2.fit(domain_s)
joblib.dump(le2, ENC_WEB_DOMAIN_SUFFIX)
print(le2.classes_)
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
def save_url_body(extractor):
try:
config.logger.info('extracting features for: ' + extractor.url)
hash = get_md5_from_string(extractor.local_file_path)
text=extractor.webscrap.get_body()
with open(config.root_dir_data + 'marseille/input/' + hash + '.txt', "w") as file:
file.write(text)
except Exception as e:
config.logger.error(repr(e))
raise
if __name__ == '__main__':
save_encoder_domain_and_suffix()
# save_encoder_html2seq('/Users/diegoesteves/DropDrive/CloudStation/experiments_cache/web_credibility/output/all_html/') # just copy and paste all html files into a single temp file to generate this.
|
37323
|
import os
import tests
from tests import at_most, compile, savefile
import subprocess
node_present = True
erlang_present = True
if os.system("node -v >/dev/null 2>/dev/null") != 0:
print " [!] ignoring nodejs tests"
node_present = False
if (os.system("erl -version >/dev/null 2>/dev/null") != 0 or
os.system("which escript >/dev/null 2>/dev/null") != 0):
print " [!] ignoring erlang tests"
erlang_present = False
sleep_sort_script='''\
#!/bin/bash
echo "Unsorted: $*"
function f() {
sleep "$1"
echo -n "$1 "
}
while [ -n "$1" ]; do
f "$1" &
shift
done
wait
echo
'''
class SingleProcess(tests.TestCase):
@at_most(seconds=2)
def test_bash_sleep(self):
self.system("sleep 10")
@at_most(seconds=2)
def test_bash_bash_sleep(self):
self.system("bash -c 'sleep 120;'")
@at_most(seconds=2)
def test_python2_sleep(self):
self.system('python2 -c "import time; time.sleep(10)"')
@at_most(seconds=2)
def test_python2_select(self):
self.system('python2 -c "import select; select.select([],[],[], 10)"')
@at_most(seconds=2)
def test_python2_poll(self):
self.system('python2 -c "import select; select.poll().poll(10000)"')
@at_most(seconds=2)
def test_python2_epoll(self):
self.system('python2 -c "import select; select.epoll().poll(10000)"')
@at_most(seconds=2)
def test_node_epoll(self):
if node_present:
self.system('node -e "setTimeout(function(){},10000);"')
def test_bad_command(self):
self.system('command_that_doesnt exist',
returncode=127, ignore_stderr=True)
def test_return_status(self):
self.system('python2 -c "import sys; sys.exit(188)"', returncode=188)
self.system('python2 -c "import sys; sys.exit(-1)"', returncode=255)
@at_most(seconds=2)
@compile(code='''
#include <unistd.h>
int main() {
sleep(10);
return(0);
}''')
def test_c_sleep(self, compiled=None):
self.system(compiled)
@at_most(seconds=2)
@compile(code='''
#include <time.h>
int main() {
struct timespec ts = {1, 0};
nanosleep(&ts, NULL);
return(0);
}''')
def test_c_nanosleep(self, compiled=None):
self.system(compiled)
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp disable +A1 +K true -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp enable +A30 +K true -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep_smp(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp enable +A30 +K false -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep_smp_no_epoll(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp disable +A1 +K true -noinput
-export([main/1]).
main(_) ->
self() ! msg,
proc(10),
receive
_ -> ok
end.
proc(0) ->
receive
_ -> halt(0)
end;
proc(N) ->
Pid = spawn(fun () -> proc(N-1) end),
receive
_ -> timer:sleep(1000),
Pid ! msg
end.
''')
def test_erlang_process_staircase(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=2)
def test_perl_sleep(self):
self.system("perl -e 'sleep 10'")
@at_most(seconds=5)
@savefile(suffix="sh", text=sleep_sort_script)
def test_sleep_sort(self, filename=None):
self.system("bash %s 1 12 1231 123213 13212 > /dev/null" % (filename,))
@at_most(seconds=5)
@savefile(suffix="sh", text=sleep_sort_script)
def test_sleep_sort(self, filename=None):
self.system("bash %s 5 3 6 3 6 3 1 4 7 > /dev/null" % (filename,))
@at_most(seconds=10)
def test_parallel_sleeps(self):
for i in range(10):
stdout = self.system(' -- '.join(['bash -c "date +%s"',
'bash -c "sleep 60; date +%s"',
'bash -c "sleep 120; date +%s"']),
capture_stdout=True)
a, b, c = [int(l) for l in stdout.split()]
assert 55 < (b - a) < 65, str(b-a)
assert 55 < (c - b) < 65, str(c-b)
assert 110 < (c - a) < 130, str(c-a)
@at_most(seconds=3)
def test_file_descriptor_leak(self):
out = subprocess.check_output("ls /proc/self/fd", shell=True)
normal_fds = len(out.split('\n'))
stdout = self.system(' -- '.join(['sleep 1',
'sleep 60',
'sleep 120',
'bash -c "sleep 180; ls /proc/self/fd"']),
capture_stdout=True)
after_fork_fds = len(stdout.split('\n'))
assert normal_fds == after_fork_fds
@at_most(seconds=4)
def test_2546_wraparound(self):
if os.uname()[4] == "x86_64":
stdout = self.system("bash -c 'for i in `seq 1 55`; do sleep 315360000; done; date +%Y'",
capture_stdout=True)
assert int(stdout) > 2500
if __name__ == '__main__':
import unittest
unittest.main()
|
37350
|
from tnparser.pipeline import read_pipelines, Pipeline
text1="I have a dog! Let's see what I can do with Silo.ai. :) Can I tokenize it? I think so! Heading: This is the heading And here continues a new sentence and there's no dot."
text2="Some other text, to see we can tokenize more stuff without reloading the model... :)"
# What do we have for English in models_en_ewt?
available_pipelines=read_pipelines("models_en_ewt/pipelines.yaml") # {pipeline_name -> its steps}
p=Pipeline(available_pipelines["tokenize"]) # launch the pipeline from the steps
for _ in range(1000):
print(p.parse(text1))
print(p.parse(text2))
|
37425
|
from flask import current_app, request, Response, make_response
from rdflib import ConjunctiveGraph
from werkzeug.exceptions import abort
from depot.middleware import FileServeApp
from .entity_blueprint import entity_blueprint
from whyis.data_extensions import DATA_EXTENSIONS
from whyis.data_formats import DATA_FORMATS
from whyis.decorator import conditional_login_required
import sadi.mimeparse
from whyis.html_mime_types import HTML_MIME_TYPES
@entity_blueprint.route('/about.<format>', methods=['GET'])
@entity_blueprint.route('/<path:name>', methods=['GET'])
@entity_blueprint.route('/<path:name>.<format>', methods=['GET'])
@entity_blueprint.route('/', methods=['GET'])
@entity_blueprint.route('/home', methods=['GET'])
@entity_blueprint.route('/about', methods=['GET'])
@conditional_login_required
def view(name=None, format=None, view=None):
current_app.db.store.nsBindings = {}
entity, content_type = current_app.get_entity_uri(name, format)
resource = current_app.get_resource(entity)
# 'view' is the default view
fileid = resource.value(current_app.NS.whyis.hasFileID)
if fileid is not None and 'view' not in request.args:
fileid = fileid.value
f = None
if current_app.nanopub_depot is not None and current_app.nanopub_depot.exists(fileid):
f = current_app.nanopub_depot.get(fileid)
elif current_app.file_depot.exists(fileid):
f = current_app.file_depot.get(fileid)
if f is not None:
fsa = FileServeApp(f, current_app.config["file_archive"].get("cache_max_age",3600*24*7))
return fsa
if content_type is None:
content_type = request.headers['Accept'] if 'Accept' in request.headers else 'text/turtle'
#print entity
fmt = sadi.mimeparse.best_match([mt for mt in list(DATA_FORMATS.keys()) if mt is not None],content_type)
if 'view' in request.args or fmt in HTML_MIME_TYPES:
return current_app.render_view(resource)
elif fmt in DATA_FORMATS:
output_graph = ConjunctiveGraph()
result, status, headers = current_app.render_view(resource, view='describe')
output_graph.parse(data=result, format="json-ld")
return output_graph.serialize(format=DATA_FORMATS[fmt]), 200, {'Content-Type':content_type}
#elif 'view' in request.args or sadi.mimeparse.best_match(htmls, content_type) in htmls:
else:
return current_app.render_view(resource)
|
37427
|
from enum import Enum
class Currency(Enum):
AUD = 'Australia Dollar'
BGN = 'Bulgaria Lev'
BRL = 'Brazil Real'
CAD = 'Canada Dollar'
CHF = 'Switzerland Franc'
CNY = 'China Yuan/Renminbi'
CZK = 'Czech Koruna'
DKK = 'Denmark Krone'
GBP = 'Great Britain Pound'
HKD = 'Hong Kong Dollar'
HRK = 'Croatia Kuna'
HUF = 'Hungary Forint'
IDR = 'Indonesia Rupiah'
ILS = 'Israel New Shekel'
INR = 'India Rupee'
JPY = 'Japan Yen'
KRW = 'South Korea Won'
MXN = 'Mexico Peso'
MYR = 'Malaysia Ringgit'
NOK = 'Norway Kroner'
NZD = 'New Zealand Dollar'
PHP = 'Philippines Peso'
PLN = 'Poland Zloty'
RON = 'Romania New Lei'
RUB = 'Russia Rouble'
SEK = 'Sweden Krona'
SGD = 'Singapore Dollar'
THB = 'Thailand Baht'
TRY = 'Turkish New Lira'
USD = 'USA Dollar'
ZAR = 'South Africa Rand'
EUR = 'Euro'
|
37429
|
import time
import argparse
from datetime import datetime
import logging
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel
from data import EqaDataLoader
from metrics import NavMetric
from models import MaskedNLLCriterion
from models import get_state, ensure_shared_grads
from data import load_vocab
from torch.autograd import Variable
from tqdm import tqdm
import time
torch.backends.cudnn.enabled = False
################################################################################################
#make models trained in pytorch 4 compatible with earlier pytorch versions
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
################################################################################################
def eval(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
eval_loader_kwargs = {
'questions_h5': getattr(args, args.eval_split + '_h5'),
'data_json': args.data_json,
'vocab': args.vocab_json,
'target_obj_conn_map_dir': args.target_obj_conn_map_dir,
'map_resolution': args.map_resolution,
'batch_size': 1,
'input_type': args.model_type,
'num_frames': 5,
'split': args.eval_split,
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': False,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
}
eval_loader = EqaDataLoader(**eval_loader_kwargs)
print('eval_loader has %d samples' % len(eval_loader.dataset))
logging.info("EVAL: eval_loader has {} samples".format(len(eval_loader.dataset)))
args.output_log_path = os.path.join(args.log_dir,
'eval_' + str(rank) + '.json')
t, epoch, best_eval_acc = 0, 0, 0.0
max_epochs = args.max_epochs
if args.mode == 'eval':
max_epochs = 1
while epoch < int(max_epochs):
invalids = []
model.load_state_dict(shared_model.state_dict())
model.eval()
# that's a lot of numbers
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
if 'cnn' in args.model_type:
done = False
while done == False:
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_length = batch
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_length[0] + 1 - i - 5 < 0:
invalids.append(idx[0])
continue
ep_inds = [
x for x in range(action_length[0] + 1 - i - 5,
action_length[0] + 1 - i)
]
sub_img_feats = torch.index_select(
img_feats, 1, torch.LongTensor(ep_inds))
init_pos = eval_loader.dataset.episode_pos_queue[
ep_inds[-1]]
h3d = eval_loader.dataset.episode_house
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append(idx[0])
continue
sub_img_feats_var = Variable(sub_img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
# sample actions till max steps or <stop>
# max no. of actions = 100
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores = model(sub_img_feats_var,
questions_var)
else:
scores = model(sub_img_feats_var)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
sub_img_feats_var = torch.cat(
[sub_img_feats_var, img_feat_var], dim=1)
sub_img_feats_var = sub_img_feats_var[:, -5:, :]
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
logging.info("EVAL: invalids: {}".format(len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'lstm' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, answer, _, actions_in, actions_out, action_lengths, _ = batch
question_var = Variable(questions.cuda())
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_lengths[0] - 1 - i < 0:
invalids.append([idx[0], i])
continue
h3d = eval_loader.dataset.episode_house
# forward through lstm till spawn
if len(eval_loader.dataset.episode_pos_queue[:-i]
) > 0:
images = eval_loader.dataset.get_frames(
h3d,
eval_loader.dataset.episode_pos_queue[:-i],
preprocess=True)
raw_img_feats = eval_loader.dataset.cnn(
Variable(torch.FloatTensor(images).cuda()))
actions_in_pruned = actions_in[:, :
action_lengths[0] -
i]
actions_in_var = Variable(actions_in_pruned.cuda())
action_lengths_pruned = action_lengths.clone(
).fill_(action_lengths[0] - i)
img_feats_var = raw_img_feats.view(1, -1, 3200)
if '+q' in args.model_type:
scores, hidden = model(
img_feats_var, question_var,
actions_in_var,
action_lengths_pruned.cpu().numpy())
else:
scores, hidden = model(
img_feats_var, False, actions_in_var,
action_lengths_pruned.cpu().numpy())
try:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
except:
invalids.append([idx[0], i])
continue
action_in = torch.LongTensor(1, 1).fill_(
actions_in[0,
action_lengths[0] - i]).cuda()
else:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
hidden = model.nav_rnn.init_hidden(1)
action_in = torch.LongTensor(1, 1).fill_(0).cuda()
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
img = h3d.env.render()
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224).cuda())).view(
1, 1, 3200)
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
actual_pos_queue = [(h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw)]
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores, hidden = model(
img_feat_var,
question_var,
Variable(action_in),
False,
hidden=hidden,
step=True)
else:
scores, hidden = model(
img_feat_var,
False,
Variable(action_in),
False,
hidden=hidden,
step=True)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
actual_pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw])
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: init_steps: {} metrics: {}".format(i, metrics.get_stat_string(mode=0)))
logging.info("EVAL: init_steps: {} invalids: {}".format(i, len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
print("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
logging.info("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
assert len(eval_loader.dataset.pruned_env_set) > 0
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'pacman' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, question, answer, actions, action_length = batch
metrics_slug = {}
h3d = eval_loader.dataset.episode_house
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if i > action_length[0]:
invalids.append([idx[0], i])
continue
question_var = Variable(question.cuda())
controller_step = False
planner_hidden = model.planner_nav_rnn.init_hidden(1)
# get hierarchical action history
(
planner_actions_in, planner_img_feats,
controller_step, controller_action_in,
controller_img_feats, init_pos,
controller_action_counter
) = eval_loader.dataset.get_hierarchical_features_till_spawn(
actions[0, :action_length[0] + 1].numpy(), i, args.max_controller_actions
)
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_img_feats_var = Variable(
planner_img_feats.cuda())
# forward planner till spawn to update hidden state
for step in range(planner_actions_in.size(0)):
planner_scores, planner_hidden = model.planner_step(
question_var, planner_img_feats_var[step]
.unsqueeze(0).unsqueeze(0),
planner_actions_in_var[step].view(1, 1),
planner_hidden
)
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
dists_to_target, pos_queue, pred_actions = [
init_dist_to_target
], [init_pos], []
planner_actions, controller_actions = [], []
episode_length = 0
if args.max_controller_actions > 1:
controller_action_counter = controller_action_counter % args.max_controller_actions
controller_action_counter = max(controller_action_counter - 1, 0)
else:
controller_action_counter = 0
first_step = True
first_step_is_controller = controller_step
planner_step = True
action = int(controller_action_in)
for step in range(args.max_episode_length):
if not first_step:
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224,
224).cuda())).view(
1, 1, 3200)
else:
img_feat_var = Variable(controller_img_feats.cuda()).view(1, 1, 3200)
if not first_step or first_step_is_controller:
# query controller to continue or not
controller_action_in = Variable(
torch.LongTensor(1, 1).fill_(action).cuda())
controller_scores = model.controller_step(
img_feat_var, controller_action_in,
planner_hidden[0])
prob = F.softmax(controller_scores, dim=1)
controller_action = int(
prob.max(1)[1].data.cpu().numpy()[0])
if controller_action == 1 and controller_action_counter < args.max_controller_actions - 1:
controller_action_counter += 1
planner_step = False
else:
controller_action_counter = 0
planner_step = True
controller_action = 0
controller_actions.append(controller_action)
first_step = False
if planner_step:
if not first_step:
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
planner_scores, planner_hidden = model.planner_step(
question_var, img_feat_var,
Variable(action_in), planner_hidden)
prob = F.softmax(planner_scores, dim=1)
action = int(
prob.max(1)[1].data.cpu().numpy()[0])
planner_actions.append(action)
episode_done = action == 3 or episode_length >= args.max_episode_length
episode_length += 1
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done:
break
img, _, _ = h3d.step(action)
first_step = False
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
try:
print(metrics.get_stat_string(mode=0))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
except:
pass
print('epoch', epoch)
print('invalids', len(invalids))
logging.info("EVAL: epoch {}".format(epoch))
logging.info("EVAL: invalids {}".format(invalids))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
epoch += 1
# checkpoint if best val loss
if metrics.metrics[8][0] > best_eval_acc: # d_D_50
best_eval_acc = metrics.metrics[8][0]
if epoch % args.eval_every == 0 and args.log == True:
metrics.dump_log()
model_state = get_state(model)
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}
checkpoint_path = '%s/epoch_%d_d_D_50_%.04f.pt' % (
args.checkpoint_dir, epoch, best_eval_acc)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("EVAL: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
print('[best_eval_d_D_50:%.04f]' % best_eval_acc)
logging.info("EVAL: [best_eval_d_D_50:{:.04f}]".format(best_eval_acc))
eval_loader.dataset._load_envs(start_idx=0, in_order=True)
def train(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
lossFn = torch.nn.CrossEntropyLoss().cuda()
optim = torch.optim.Adamax(
filter(lambda p: p.requires_grad, shared_model.parameters()),
lr=args.learning_rate)
train_loader_kwargs = {
'questions_h5': args.train_h5,
'data_json': args.data_json,
'vocab': args.vocab_json,
'batch_size': args.batch_size,
'input_type': args.model_type,
'num_frames': 5,
'map_resolution': args.map_resolution,
'split': 'train',
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': args.cache,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
'max_actions': args.max_actions
}
args.output_log_path = os.path.join(args.log_dir,
'train_' + str(rank) + '.json')
if 'pacman' in args.model_type:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['planner_loss', 'controller_loss'],
log_json=args.output_log_path)
else:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['loss'],
log_json=args.output_log_path)
train_loader = EqaDataLoader(**train_loader_kwargs)
print('train_loader has %d samples' % len(train_loader.dataset))
logging.info('TRAIN: train loader has {} samples'.format(len(train_loader.dataset)))
t, epoch = 0, 0
while epoch < int(args.max_epochs):
if 'cnn' in args.model_type:
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, _, actions_out, _ = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_out_var = Variable(actions_out.cuda())
if '+q' in args.model_type:
scores = model(img_feats_var, questions_var)
else:
scores = model(img_feats_var)
loss = lossFn(scores, actions_out_var)
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'lstm' in args.model_type:
lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
total_times = []
while done == False:
start_time = time.time()
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_lengths, masks = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_in_var = Variable(actions_in.cuda())
actions_out_var = Variable(actions_out.cuda())
action_lengths = action_lengths.cuda()
masks_var = Variable(masks.cuda())
action_lengths, perm_idx = action_lengths.sort(
0, descending=True)
img_feats_var = img_feats_var[perm_idx]
if '+q' in args.model_type:
questions_var = questions_var[perm_idx]
actions_in_var = actions_in_var[perm_idx]
actions_out_var = actions_out_var[perm_idx]
masks_var = masks_var[perm_idx]
if '+q' in args.model_type:
scores, hidden = model(img_feats_var, questions_var,
actions_in_var,
action_lengths.cpu().numpy())
else:
scores, hidden = model(img_feats_var, False,
actions_in_var,
action_lengths.cpu().numpy())
#block out masks
if args.curriculum:
curriculum_length = (epoch+1)*5
for i, action_length in enumerate(action_lengths):
if action_length - curriculum_length > 0:
masks_var[i, :action_length-curriculum_length] = 0
logprob = F.log_softmax(scores, dim=1)
loss = lossFn(
logprob, actions_out_var[:, :action_lengths.max()]
.contiguous().view(-1, 1),
masks_var[:, :action_lengths.max()].contiguous().view(
-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
logging.info("TRAIN LSTM loss: {:.6f}".format(loss.data[0]))
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'pacman' in args.model_type:
planner_lossFn = MaskedNLLCriterion().cuda()
controller_lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, planner_img_feats, planner_actions_in, \
planner_actions_out, planner_action_lengths, planner_masks, \
controller_img_feats, controller_actions_in, planner_hidden_idx, \
controller_outs, controller_action_lengths, controller_masks = batch
questions_var = Variable(questions.cuda())
planner_img_feats_var = Variable(planner_img_feats.cuda())
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_actions_out_var = Variable(
planner_actions_out.cuda())
planner_action_lengths = planner_action_lengths.cuda()
planner_masks_var = Variable(planner_masks.cuda())
controller_img_feats_var = Variable(
controller_img_feats.cuda())
controller_actions_in_var = Variable(
controller_actions_in.cuda())
planner_hidden_idx_var = Variable(
planner_hidden_idx.cuda())
controller_outs_var = Variable(controller_outs.cuda())
controller_action_lengths = controller_action_lengths.cuda(
)
controller_masks_var = Variable(controller_masks.cuda())
planner_action_lengths, perm_idx = planner_action_lengths.sort(
0, descending=True)
questions_var = questions_var[perm_idx]
planner_img_feats_var = planner_img_feats_var[perm_idx]
planner_actions_in_var = planner_actions_in_var[perm_idx]
planner_actions_out_var = planner_actions_out_var[perm_idx]
planner_masks_var = planner_masks_var[perm_idx]
controller_img_feats_var = controller_img_feats_var[
perm_idx]
controller_actions_in_var = controller_actions_in_var[
perm_idx]
controller_outs_var = controller_outs_var[perm_idx]
planner_hidden_idx_var = planner_hidden_idx_var[perm_idx]
controller_action_lengths = controller_action_lengths[
perm_idx]
controller_masks_var = controller_masks_var[perm_idx]
planner_scores, controller_scores, planner_hidden = model(
questions_var, planner_img_feats_var,
planner_actions_in_var,
planner_action_lengths.cpu().numpy(),
planner_hidden_idx_var, controller_img_feats_var,
controller_actions_in_var, controller_action_lengths)
planner_logprob = F.log_softmax(planner_scores, dim=1)
controller_logprob = F.log_softmax(
controller_scores, dim=1)
planner_loss = planner_lossFn(
planner_logprob,
planner_actions_out_var[:, :planner_action_lengths.max(
)].contiguous().view(-1, 1),
planner_masks_var[:, :planner_action_lengths.max()]
.contiguous().view(-1, 1))
controller_loss = controller_lossFn(
controller_logprob,
controller_outs_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1),
controller_masks_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update(
[planner_loss.data[0], controller_loss.data[0]])
logging.info("TRAINING PACMAN planner-loss: {:.6f} controller-loss: {:.6f}".format(
planner_loss.data[0], controller_loss.data[0]))
# backprop and update
if args.max_controller_actions == 1:
(planner_loss).backward()
else:
(planner_loss + controller_loss).backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
epoch += 1
if epoch % args.save_every == 0:
model_state = get_state(model)
optimizer_state = optim.state_dict()
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad,
'state': model_state,
'epoch': epoch,
'optimizer': optimizer_state}
checkpoint_path = '%s/epoch_%d_thread_%d.pt' % (
args.checkpoint_dir, epoch, rank)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("TRAIN: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data params
parser.add_argument('-train_h5', default='data/train.h5')
parser.add_argument('-val_h5', default='data/val.h5')
parser.add_argument('-test_h5', default='data/test.h5')
parser.add_argument('-data_json', default='data/data.json')
parser.add_argument('-vocab_json', default='data/vocab.json')
parser.add_argument(
'-target_obj_conn_map_dir',
default='data/target-obj-conn-maps/500')
parser.add_argument('-map_resolution', default=500, type=int)
parser.add_argument(
'-mode',
default='train+eval',
type=str,
choices=['train', 'eval', 'train+eval'])
parser.add_argument('-eval_split', default='val', type=str)
# model details
parser.add_argument(
'-model_type',
default='cnn',
choices=['cnn', 'cnn+q', 'lstm', 'lstm+q', 'lstm-mult+q', 'pacman'])
parser.add_argument('-max_episode_length', default=100, type=int)
parser.add_argument('-curriculum', default=0, type=int)
# optim params
parser.add_argument('-batch_size', default=20, type=int)
parser.add_argument('-learning_rate', default=1e-3, type=float)
parser.add_argument('-max_epochs', default=1000, type=int)
parser.add_argument('-overfit', default=False, action='store_true')
# bookkeeping
parser.add_argument('-print_every', default=5, type=int)
parser.add_argument('-eval_every', default=1, type=int)
parser.add_argument('-save_every', default=1000, type=int) #optional if you would like to save specific epochs as opposed to relying on the eval thread
parser.add_argument('-identifier', default='cnn')
parser.add_argument('-num_processes', default=1, type=int)
parser.add_argument('-max_threads_per_gpu', default=10, type=int)
# checkpointing
parser.add_argument('-checkpoint_path', default=False)
parser.add_argument('-checkpoint_dir', default='checkpoints/nav/')
parser.add_argument('-log_dir', default='logs/nav/')
parser.add_argument('-log', default=False, action='store_true')
parser.add_argument('-cache', default=False, action='store_true')
parser.add_argument('-max_controller_actions', type=int, default=5)
parser.add_argument('-max_actions', type=int)
args = parser.parse_args()
args.time_id = time.strftime("%m_%d_%H:%M")
#MAX_CONTROLLER_ACTIONS = args.max_controller_actions
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
if args.curriculum:
assert 'lstm' in args.model_type #TODO: Finish implementing curriculum for other model types
logging.basicConfig(filename=os.path.join(args.log_dir, "run_{}.log".format(
str(datetime.now()).replace(' ', '_'))),
level=logging.INFO,
format='%(asctime)-15s %(message)s')
try:
args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
args.gpus = [int(x) for x in args.gpus]
except KeyError:
print("CPU not supported")
logging.info("CPU not supported")
exit()
if args.checkpoint_path != False:
print('Loading checkpoint from %s' % args.checkpoint_path)
logging.info("Loading checkpoint from {}".format(args.checkpoint_path))
args_to_keep = ['model_type']
checkpoint = torch.load(args.checkpoint_path, map_location={
'cuda:0': 'cpu'
})
for i in args.__dict__:
if i not in args_to_keep:
checkpoint['args'][i] = args.__dict__[i]
args = type('new_dict', (object, ), checkpoint['args'])
args.checkpoint_dir = os.path.join(args.checkpoint_dir,
args.time_id + '_' + args.identifier)
args.log_dir = os.path.join(args.log_dir,
args.time_id + '_' + args.identifier)
# if set to overfit; set eval_split to train
if args.overfit == True:
args.eval_split = 'train'
print(args.__dict__)
logging.info(args.__dict__)
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
os.makedirs(args.log_dir)
if args.model_type == 'cnn':
model_kwargs = {}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
shared_model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
shared_model.share_memory()
if args.checkpoint_path != False:
print('Loading params from checkpoint: %s' % args.checkpoint_path)
logging.info("Loading params from checkpoint: {}".format(args.checkpoint_path))
shared_model.load_state_dict(checkpoint['state'])
if args.mode == 'eval':
eval(0, args, shared_model)
elif args.mode == 'train':
if args.num_processes > 1:
processes = []
for rank in range(0, args.num_processes):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
train(0, args, shared_model)
else:
processes = []
# Start the eval thread
p = mp.Process(target=eval, args=(0, args, shared_model))
p.start()
processes.append(p)
# Start the training thread(s)
for rank in range(1, args.num_processes + 1):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
|
37430
|
import os
import json
import argparse
from pathlib import Path
import pandas as pd
import dateutil
parser = argparse.ArgumentParser()
parser.add_argument("directory", type=Path help="Path to the directory.")
def main():
args = parser.parse_args()
dirs = sorted([d for d in os.listdir(args.directory) if os.path.isdir(args.directory / d)], key=lambda x: int(x[3:]))
header = ["RUN", "DESCR", "START", "BRANCH", "COMMITMSG"]
data = []
for d in dirs:
config_path = args.directory / d / "config.json"
if os.path.exists(config_path):
with open(config_path) as f:
config = json.load(f)
else:
config = {}
run_data = [
d,
config.get("description", ""),
dateutil.parser.parse(config["start_time"]).strftime("%d/%m/%y %H:%M") if "start_time" in config else "",
]
run_data += [config["git"]["head"], config["git"]["message"]] if "git" in config else [""] * 2
data.append(run_data)
df = pd.DataFrame(data, columns=header)
df.set_index("RUN", inplace=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None, "display.width", None, "display.max_colwidth", 100):
print(df)
if __name__ == '__main__':
main()
|
37434
|
from BinaryModel import *
from numpy.random import rand
class MajorityModel(BinaryModel):
def __init__(self, filename=None):
self.mdlPrm = {
'addNoise' : False,
}
self.wkrIds = {}
self.imgIds = {}
if filename:
self.load_data(filename)
else:
self._setup_prior()
def __del__(self):
pass
def load_data(self, filename, skipyaml=False):
"""
Data is assumed to be in the format:
imageId workerId label
"""
# load the text data
filein = open(filename)
info = filein.readline().rstrip().split(' ')
self.numLbls = int(info[2])
self.numWkrs = int(info[1])
self.numImgs = int(info[0])
self.imgPrm = []
for i in range(self.numImgs):
self.imgPrm.append([0, 0]) # (frac +ve votes, total n votes)
self.wkrLbls = dict((id, []) for id in range(self.numWkrs))
self.imgLbls = dict((id, []) for id in range(self.numImgs))
self.labels = []
for (lino, line) in enumerate(filein):
cols = [int(c) for c in line.rstrip().split(' ')]
iId = cols[0]; wId = cols[1]; lij = int(cols[2]==1)
self.wkrLbls[wId].append([iId, lij])
self.imgLbls[iId].append([wId, lij])
self.labels.append((iId, wId, lij))
self.imgPrm[iId][0] += lij
self.imgPrm[iId][1] += 1
# renormalize img prm
for i in range(len(self.imgPrm)):
self.imgPrm[i][0] = float(self.imgPrm[i][0])/self.imgPrm[i][1]
def get_num_wkrs(self):
return self.numWkrs
def get_num_imgs(self):
return self.numImgs
def get_num_lbls(self):
return self.numLbls
def set_model_param(self, raw=[], prm=None):
"""
Sets model parameters.
Arguments:
- `raw`: raw parameter vector
- `prm`: hash of model parameter values to be changed
"""
if not prm is None:
for (k, v) in prm.iteritems():
self.mdlPrm[k] = v
def set_worker_param(self, raw):
pass
def set_image_param(self, raw):
self.imgPrm = [r for r in raw]
def get_model_param(self):
return {}
def get_worker_param_raw(self):
return {}
def get_image_param_raw(self):
return [p for p in self.imgPrm]
def get_worker_param(self, id=None):
return {}
def get_image_param(self, id=None):
return [p for p in self.imgPrm]
def get_labels(self):
if self.mdlPrm['addNoise']:
return [int((self.imgPrm[i][0]+(rand()-.5)/self.imgPrm[i][1])>.5)\
for i in range(len(self.imgPrm))]
else:
return [int(self.imgPrm[i][0]>.5) for i \
in range(len(self.imgPrm))]
# TODO: load and save parameters
def optimize_worker_param(self):
pass
def optimize_image_param(self):
pass
def objective(self, prm=None):
pass
def image_objective(self, prm=None):
pass
def image_objective_range(self, imgId, prm):
pass
def worker_objective_range(self, wkrId, prm):
pass
def gradient(self, prm=None):
return []
def worker_gradient(self, prm=None):
return []
def image_gradient(self, prm=None):
pass
def get_num_wkr_lbls(self):
return [len(self.wkrLbls[id]) for id in range(self.numWkrs)]
def get_num_img_lbls(self):
return [len(self.imgLbls[id]) for id in range(self.numImgs)]
|
37447
|
import os
import re
import gzip
import argparse
import pandas as pd
import numpy as np
from collections import defaultdict
def get_args():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description="Method to create track for escape mutations")
parser.add_argument("-xlsx", help="file containing all the data")
parser.add_argument("-pid", help="pep to number", default="prot_names_pids_8.txt")
parser.add_argument("-gb_tools", help="path to gb_tools", default="./")
args = parser.parse_args()
return args
def read_pid(args):
inputfilehandler = open(args.pid, 'r')
pid = {}
aaid = {}
nucid = {}
for line in inputfilehandler:
line = line.strip()
fields = line.split()
peptide = fields[0]
pid[peptide] = fields[1]
nucid[peptide] = fields[2]
aaid[peptide] = fields[3]
inputfilehandler.close()
return (pid, aaid, nucid)
def get_start_pos(peptide, pid, aaid, nucid):
first_eight = ''.join(list(peptide)[0:8])
if first_eight in pid:
return nucid[first_eight]
return -1
def main(args):
(pid, aaid, nucid) = read_pid(args)
cd8_epitopes = pd.read_excel(args.xlsx,
skiprows=0,
header=0,
index_col=None)
print (cd8_epitopes.columns)
outfiletag = 'escape_mutations'
beddetailfilename = outfiletag+'.beddetail'
bedfilename = outfiletag+'.bed'
bbfilename = outfiletag+'.bb'
#print (cd8_epitopes['Probable Infection Location'])
#print (cd8_epitopes['Gene'])
#print (cd8_epitopes['Position of Mutation'])
#print (cd8_epitopes['AA Change'])
#print (cd8_epitopes['Codon Change'])
#print (cd8_epitopes['Wildtype Sequence'])
#print (cd8_epitopes['Mutant Sequence 1'])
#print (cd8_epitopes['Mutant Sequence 2'])
wt_mt = defaultdict(list)
mutations = []
beddetailfilehandler = open(beddetailfilename, 'w')
for i in range(0, len(cd8_epitopes['Position of Mutation'])):
chrom = "NC_045512v2"
reserved = 0
score = 1000
strand = '+'
pom = cd8_epitopes['Position of Mutation'][i]
gene = cd8_epitopes['Gene'][i]
pil = cd8_epitopes['Probable Infection Location'][i]
aa_change = cd8_epitopes['AA Change'][i]
c_change = cd8_epitopes['Codon Change'][i]
if gene+'_'+c_change+'_'+aa_change not in mutations:
mutations.append(gene+'_'+c_change+'_'+aa_change)
if ';' not in cd8_epitopes['Wildtype Sequence'][i]:
chromStart = get_start_pos(cd8_epitopes['Wildtype Sequence'][i], pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(cd8_epitopes['Wildtype Sequence'][i]))*3+int(chromStart))
thickStart = str(chromStart)
thickEnd = str(chromEnd)
wt_pep = cd8_epitopes['Wildtype Sequence'][i]
mt_pep = cd8_epitopes['Mutant Sequence 1'][i]
if wt_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt_pep in wt_mt[wt_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt_pep+"\n")
else:
wt_pep = cd8_epitopes['Wildtype Sequence'][i]
wt1_pep = wt_pep.split(';')[0]
wt2_pep = wt_pep.split(';')[1]
mt1_pep = cd8_epitopes['Mutant Sequence 1'][i]
mt2_pep = cd8_epitopes['Mutant Sequence 2'][i]
chromStart = get_start_pos(wt1_pep, pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(wt1_pep))*3+int(chromStart))
thickStart = chromStart
thickEnd = chromEnd
if wt1_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt1_pep in wt_mt[wt1_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt1_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt1_pep+"\n")
chromStart = get_start_pos(wt2_pep, pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(wt2_pep))*3+int(chromStart))
thickStart = chromStart
thickEnd = chromEnd
if wt2_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt2_pep in wt_mt[wt2_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt2_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt2_pep+"\n")
beddetailfilehandler.close()
print (len(mutations))
# use gbtools to convert from beddetail to bed and bigbed
os.system(f"bedSort {beddetailfilename} {bedfilename}")
os.system(f"bedToBigBed {bedfilename} wuhCor1.sizes {bbfilename} -tab -type=bed9+ -as=escape_mutants.as")
if __name__ == "__main__":
main(get_args())
|
37452
|
from django.conf import settings
DRFSO2_PROPRIETARY_BACKEND_NAME = getattr(settings, 'DRFSO2_PROPRIETARY_BACKEND_NAME', "Django")
DRFSO2_URL_NAMESPACE = getattr(settings, 'DRFSO2_URL_NAMESPACE', "")
|
37453
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.modules.xlinebase import XLineBase
from txircd.utils import durationToSeconds, ircLower, now
from zope.interface import implements
from fnmatch import fnmatchcase
class GLine(ModuleData, XLineBase):
implements(IPlugin, IModuleData)
name = "GLine"
core = True
lineType = "G"
def actions(self):
return [ ("register", 10, self.checkLines),
("changeident", 10, self.checkIdentChange),
("changehost", 10, self.checkHostChange),
("commandpermission-GLINE", 10, self.restrictToOper),
("statsruntype-glines", 10, self.generateInfo),
("burst", 10, self.burstLines) ]
def userCommands(self):
return [ ("GLINE", 1, UserGLine(self)) ]
def serverCommands(self):
return [ ("ADDLINE", 1, ServerAddGLine(self)),
("DELLINE", 1, ServerDelGLine(self)) ]
def load(self):
self.initializeLineStorage()
def verifyConfig(self, config):
if "client_ban_msg" in config and not isinstance(config["client_ban_msg"], basestring):
raise ConfigValidationError("client_ban_msg", "value must be a string")
def checkUserMatch(self, user, mask, data):
banMask = self.normalizeMask(mask)
userMask = ircLower("{}@{}".format(user.ident, user.host()))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.realHost))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.ip))
if fnmatchcase(userMask, banMask):
return True
return False
def killUser(self, user, reason):
self.ircd.log.info("Matched user {user.uuid} ({user.ident}@{user.host()}) against a g:line: {reason}", user=user, reason=reason)
user.sendMessage(irc.ERR_YOUREBANNEDCREEP, self.ircd.config.get("client_ban_msg", "You're banned! Email <EMAIL> for assistance."))
user.disconnect("G:Lined: {}".format(reason))
def checkLines(self, user):
banReason = self.matchUser(user)
if banReason is not None:
self.killUser(user, banReason)
return False
return True
def checkIdentChange(self, user, oldIdent, fromServer):
self.checkLines(user)
def checkHostChange(self, user, hostType, oldHost, fromServer):
if user.uuid[:3] == self.ircd.serverID:
self.checkLines(user)
def restrictToOper(self, user, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-gline", users=[user]):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges")
return False
return None
class UserGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, user, params, prefix, tags):
if len(params) < 1 or len(params) == 2:
user.sendSingleError("GLineParams", irc.ERR_NEEDMOREPARAMS, "GLINE", "Not enough parameters")
return None
banmask = params[0]
if banmask in self.module.ircd.userNicks:
targetUser = self.module.ircd.users[self.module.ircd.userNicks[banmask]]
banmask = "{}@{}".format(targetUser.ident, targetUser.realHost)
else:
if "@" not in banmask:
banmask = "*@{}".format(banmask)
if len(params) == 1:
return {
"mask": banmask
}
return {
"mask": banmask,
"duration": durationToSeconds(params[1]),
"reason": " ".join(params[2:])
}
def execute(self, user, data):
banmask = data["mask"]
if "reason" in data:
if not self.module.addLine(banmask, now(), data["duration"], user.hostmask(), data["reason"]):
user.sendMessage("NOTICE", "*** G:Line for {} is already set.".format(banmask))
return True
badUsers = []
for checkUser in self.module.ircd.users.itervalues():
reason = self.module.matchUser(checkUser)
if reason is not None:
badUsers.append((checkUser, reason))
for badUser in badUsers:
self.module.killUser(*badUser)
if data["duration"] > 0:
user.sendMessage("NOTICE", "*** Timed g:line for {} has been set, to expire in {} seconds.".format(banmask, data["duration"]))
else:
user.sendMessage("NOTICE", "*** Permanent g:line for {} has been set.".format(banmask))
return True
if not self.module.delLine(banmask):
user.sendMessage("NOTICE", "*** G:Line for {} doesn't exist.".format(banmask))
return True
user.sendMessage("NOTICE", "*** G:Line for {} has been removed.".format(banmask))
return True
class ServerAddGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerAddParams(server, params, prefix, tags)
def execute(self, server, data):
if self.module.executeServerAddCommand(server, data):
badUsers = []
for user in self.module.ircd.users.itervalues():
reason = self.module.matchUser(user)
if reason is not None:
badUsers.append((user, reason))
for user in badUsers:
self.module.killUser(*user)
return True
return None
class ServerDelGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerDelParams(server, params, prefix, tags)
def execute(self, server, data):
return self.module.executeServerDelCommand(server, data)
glineModule = GLine()
|
37457
|
load(
"//tensorflow/core/platform:rules_cc.bzl",
"cc_library",
)
def poplar_cc_library(**kwargs):
""" Wrapper for inserting poplar specific build options.
"""
if not "copts" in kwargs:
kwargs["copts"] = []
copts = kwargs["copts"]
copts.append("-Werror=return-type")
cc_library(**kwargs)
|
37473
|
from datetime import datetime, date
from marqeta.response_models.result import Result
from marqeta.response_models.kyc_question import KycQuestion
from marqeta.response_models import datetime_object
import json
import re
class KycResponse(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def created_time(self):
if 'created_time' in self.json_response:
return datetime_object('created_time', self.json_response)
@property
def last_modified_time(self):
if 'last_modified_time' in self.json_response:
return datetime_object('last_modified_time', self.json_response)
@property
def token(self):
return self.json_response.get('token', None)
@property
def user_token(self):
return self.json_response.get('user_token', None)
@property
def business_token(self):
return self.json_response.get('business_token', None)
@property
def result(self):
if 'result' in self.json_response:
return Result(self.json_response['result'])
@property
def manual_override(self):
return self.json_response.get('manual_override', None)
@property
def notes(self):
return self.json_response.get('notes', None)
@property
def questions(self):
if 'questions' in self.json_response:
return [KycQuestion(val) for val in self.json_response['questions']]
@property
def reference_id(self):
return self.json_response.get('reference_id', None)
def __repr__(self):
return '<Marqeta.response_models.kyc_response.KycResponse>' + self.__str__()
|
37485
|
a = input("Enter the string:")
b = a.find("@")
c = a.find("#")
print("The original string is:",a)
print("The substring between @ and # is:",a[b+1:c])
|
37496
|
import thoonk
from thoonk.feeds import SortedFeed
import unittest
from ConfigParser import ConfigParser
class TestLeaf(unittest.TestCase):
def setUp(self):
conf = ConfigParser()
conf.read('test.cfg')
if conf.sections() == ['Test']:
self.ps = thoonk.Thoonk(host=conf.get('Test', 'host'),
port=conf.getint('Test', 'port'),
db=conf.getint('Test', 'db'))
self.ps.redis.flushdb()
else:
print 'No test configuration found in test.cfg'
exit()
def test_10_basic_sorted_feed(self):
"""Test basic sorted feed publish and retrieve."""
l = self.ps.sorted_feed("sortedfeed")
self.assertEqual(l.__class__, SortedFeed)
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
r = l.get_ids()
v = l.get_items()
items = {'1': 'hi',
'2': 'bye',
'3': 'thanks',
'4': "you're welcome"}
self.assertEqual(r, ['1', '2', '3', '4'], "Sorted feed results did not match publish: %s." % r)
self.assertEqual(v, items, "Sorted feed items don't match: %s" % v)
def test_20_sorted_feed_before(self):
"""Test addding an item before another item"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.publish_before('2', 'foo')
r = l.get_ids()
self.assertEqual(r, ['1', '3', '2'], "Sorted feed results did not match: %s." % r)
def test_30_sorted_feed_after(self):
"""Test adding an item after another item"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.publish_after('1', 'foo')
r = l.get_ids()
self.assertEqual(r, ['1', '3', '2'], "Sorted feed results did not match: %s." % r)
def test_40_sorted_feed_prepend(self):
"""Test addding an item to the front of the sorted feed"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.prepend('bar')
r = l.get_ids()
self.assertEqual(r, ['3', '1', '2'],
"Sorted feed results don't match: %s" % r)
def test_50_sorted_feed_edit(self):
"""Test editing an item in a sorted feed"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.edit('1', 'bar')
r = l.get_ids()
v = l.get_item('1')
vs = l.get_items()
items = {'1': 'bar',
'2': 'bye'}
self.assertEqual(r, ['1', '2'],
"Sorted feed results don't match: %s" % r)
self.assertEqual(v, 'bar', "Items don't match: %s" % v)
self.assertEqual(vs, items, "Sorted feed items don't match: %s" % vs)
def test_60_sorted_feed_retract(self):
"""Test retracting an item from a sorted feed"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.retract('3')
r = l.get_ids()
self.assertEqual(r, ['1', '2', '4'],
"Sorted feed results don't match: %s" % r)
def test_70_sorted_feed_move_first(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_first('4')
r = l.get_ids()
self.assertEqual(r, ['4', '1', '2', '3'],
"Sorted feed results don't match: %s" % r)
def test_71_sorted_feed_move_last(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_last('2')
r = l.get_ids()
self.assertEqual(r, ['1', '3', '4', '2'],
"Sorted feed results don't match: %s" % r)
def test_72_sorted_feed_move_before(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_before('1', '2')
r = l.get_ids()
self.assertEqual(r, ['2', '1', '3', '4'],
"Sorted feed results don't match: %s" % r)
def test_73_sorted_feed_move_after(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_after('1', '4')
r = l.get_ids()
self.assertEqual(r, ['1', '4', '2', '3'],
"Sorted feed results don't match: %s" % r)
suite = unittest.TestLoader().loadTestsFromTestCase(TestLeaf)
|
37499
|
from unittest.mock import MagicMock
import google.protobuf.text_format as text_format
import numpy as np
from banditpylib.bandits import CvarReward
from banditpylib.data_pb2 import Actions, Context
from .ts import ThompsonSampling
class TestThompsonSampling:
"""Test thompson sampling policy"""
def test_simple_run(self):
revenues = np.array([0, 0.7, 0.8, 0.9, 1.0])
horizon = 100
reward = CvarReward(0.7)
learner = ThompsonSampling(revenues=revenues,
horizon=horizon,
reward=reward)
# Test warm start
learner.reset()
assert learner.actions(Context()).SerializeToString() == text_format.Parse(
"""
arm_pulls {
arm {
set {
id: 1
}
}
times: 1
}
""", Actions()).SerializeToString()
learner.reset()
# pylint: disable=protected-access
learner._ThompsonSampling__within_warm_start = MagicMock(
return_value=False)
mock_preference_params = np.array([1, 1, 1, 1, 1])
learner._ThompsonSampling__correlated_sampling = MagicMock(
return_value=mock_preference_params)
assert learner.actions(Context()).SerializeToString() == text_format.Parse(
"""
arm_pulls {
arm {
set {
id: 1
id: 2
id: 3
id: 4
}
}
times: 1
}
""", Actions()).SerializeToString()
|
37500
|
from maru import pymorphy
from maru.lemmatizer.abstract import ILemmatizer
from maru.tag import Tag
from maru.types import Word
class PymorphyLemmatizer(ILemmatizer):
def lemmatize(self, word: Word, tag: Tag) -> Word:
best_parse = max(
pymorphy.analyze(word),
key=lambda parse: (
tag.pos is pymorphy.get_part_of_speech(parse),
tag.case is pymorphy.get_case(parse),
tag.gender is pymorphy.get_gender(parse),
),
)
return best_parse.normal_form
|
37502
|
import os
import xcffib
from xcffib.testing import XvfbTest
from xcffib.xproto import Atom, ConfigWindow, EventMask, GetPropertyType
conn = xcffib.connect(os.environ['DISPLAY'])
xproto = xcffib.xproto.xprotoExtension(conn)
def arrange(layout, windowids):
for lay, winid in zip(layout, windowids):
xproto.ConfigureWindow(winid, ConfigWindow.X | ConfigWindow.Y | ConfigWindow.Width | ConfigWindow.Height, lay)
conn.flush()
def move(winid, x, y, sync=True):
xproto.ConfigureWindow(winid, ConfigWindow.X | ConfigWindow.Y, [x, y])
if sync:
conn.flush()
|
37515
|
import subprocess
__all__ = ['view_env', 'create_env', 'remove_env']
def view_env():
"""Get virtual environment info."""
cmd = f"conda info -e"
s = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = s.decode('utf-8').strip().split('\n')[2:]
s = [i.split(' ') for i in s]
return {i[0]:i[-1] for i in s}
def create_env(name, version):
"""Create virtual environment.
Args:
name: virtual environment.
version: python version.
Return:
log info.
"""
cmd = 'conda update -n base -c defaults conda'
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = view_env()
if name in s:
return 'Virtual environment already exists.'
cmd = f"conda create -n {name} python={version} -y"
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = view_env()
if name in s:
return 'Virtual environment successfully created.'
return 'Virtual environment failed created.'
def remove_env(name):
"""Remove virtual environment.
Args:
name: virtual environment.
Return:
log info.
"""
s = view_env()
if name not in s:
return 'Virtual environment not exists.'
cmd = f'conda remove -n {name} --all'
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = view_env()
if name not in s:
return 'Virtual environment successfully removed.'
return 'Virtual environment failed removed.'
|
37526
|
import asyncio
import math
import networkx as nx
import ccxt.async_support as ccxt
import datetime
import logging
from .logging_utils import FormatForLogAdapter
__all__ = [
'FeesNotAvailable',
'create_exchange_graph',
'load_exchange_graph',
]
adapter = FormatForLogAdapter(logging.getLogger('peregrinearb.utils.single_exchange'))
class FeesNotAvailable(Exception):
pass
def create_exchange_graph(exchange: ccxt.Exchange):
"""
Returns a simple graph representing exchange. Each edge represents a market.
exchange.load_markets() must have been called. Will throw a ccxt error if it has not.
"""
graph = nx.Graph()
for market_name in exchange.symbols:
try:
base_currency, quote_currency = market_name.split('/')
# if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
except ValueError:
continue
graph.add_edge(base_currency, quote_currency, market_name=market_name)
return graph
async def load_exchange_graph(exchange, name=True, fees=True, suppress=None, depth=False, tickers=None) -> nx.DiGraph:
"""
Returns a networkx DiGraph populated with the current ask and bid prices for each market in graph (represented by
edges). If depth, also adds an attribute 'depth' to each edge which represents the current volume of orders
available at the price represented by the 'weight' attribute of each edge.
"""
if suppress is None:
suppress = ['markets']
if name:
exchange = getattr(ccxt, exchange)()
if tickers is None:
adapter.info('Fetching tickers')
tickers = await exchange.fetch_tickers()
adapter.info('Fetched tickers')
market_count = len(tickers)
adapter.info('Loading exchange graph', marketCount=market_count)
adapter.debug('Initializing empty graph with exchange_name and timestamp attributes')
graph = nx.DiGraph()
# todo: get exchange's server time?
graph.graph['exchange_name'] = exchange.id
graph.graph['datetime'] = datetime.datetime.now(tz=datetime.timezone.utc)
adapter.debug('Initialized empty graph with exchange_name and timestamp attributes')
async def add_edges():
tasks = [_add_weighted_edge_to_graph(exchange, market_name, graph, log=True, fees=fees,
suppress=suppress, ticker=ticker, depth=depth, )
for market_name, ticker in tickers.items()]
await asyncio.wait(tasks)
if fees:
for i in range(20):
try:
adapter.info('Loading fees', iteration=i)
# must load markets to get fees
await exchange.load_markets()
except (ccxt.DDoSProtection, ccxt.RequestTimeout) as e:
if i == 19:
adapter.warning('Rate limited on final iteration, raising error', iteration=i)
raise e
adapter.warning('Rate limited when loading markets', iteration=i)
await asyncio.sleep(0.1)
except ccxt.ExchangeNotAvailable as e:
if i == 19:
adapter.warning('Cannot load markets due to ExchangeNotAvailable error, '
'graph will not be loaded.', iteration=i)
raise e
adapter.warning('Received ExchangeNotAvailable error when loading markets', iteration=i)
else:
break
adapter.info('Loaded fees', iteration=i, marketCount=market_count)
currency_count = len(exchange.currencies)
adapter.info('Adding data to graph', marketCount=market_count, currencyCount=currency_count)
await add_edges()
adapter.info('Added data to graph', marketCount=market_count, currencyCount=currency_count)
else:
adapter.info('Adding data to graph', marketCount=market_count)
await add_edges()
adapter.info('Added data to graph', marketCount=market_count)
adapter.debug('Closing connection')
await exchange.close()
adapter.debug('Closed connection')
adapter.info('Loaded exchange graph')
return graph
async def _add_weighted_edge_to_graph(exchange: ccxt.Exchange, market_name: str, graph: nx.DiGraph, log=True,
fees=False, suppress=None, ticker=None, depth=False, ):
"""
todo: add global variable to bid_volume/ ask_volume to see if all tickers (for a given exchange) have value == None
Returns a Networkx DiGraph populated with the current ask and bid prices for each market in graph (represented by
edges).
:param exchange: A ccxt Exchange object
:param market_name: A string representing a cryptocurrency market formatted like so:
'{base_currency}/{quote_currency}'
:param graph: A Networkx DiGraph upon
:param log: If the edge weights given to the graph should be the negative logarithm of the ask and bid prices. This
is necessary to calculate arbitrage opportunities.
:param fees: If fees should be taken into account for prices.
:param suppress: A list or set which tells which types of warnings to not throw. Accepted elements are 'markets'.
:param ticker: A dictionary representing a market as returned by ccxt's Exchange's fetch_ticker method
:param depth: If True, also adds an attribute 'depth' to each edge which represents the current volume of orders
available at the price represented by the 'weight' attribute of each edge.
"""
adapter.debug('Adding edge to graph', market=market_name)
if ticker is None:
try:
adapter.info('Fetching ticker', market=market_name)
ticker = await exchange.fetch_ticker(market_name)
adapter.info('Fetched ticker', market=market_name)
# any error is solely because of fetch_ticker
except:
if 'markets' not in suppress:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
if fees:
if 'taker' in exchange.markets[market_name]:
# we always take the taker side because arbitrage depends on filling orders
# sell_fee_dict = exchange.calculate_fee(market_name, 'limit', 'sell', 0, 0, 'taker')
# buy_fee_dict = exchange.calculate_fee(market_name, 'limit', 'buy', 0, 0, 'taker')
fee = exchange.markets[market_name]['taker']
else:
if 'fees' not in suppress:
adapter.warning("The fees for {} have not yet been implemented into ccxt's uniform API."
.format(exchange))
raise FeesNotAvailable('Fees are not available for {} on {}'.format(market_name, exchange.id))
else:
fee = 0.002
else:
fee = 0
fee_scalar = 1 - fee
try:
bid_rate = ticker['bid']
ask_rate = ticker['ask']
if depth:
bid_volume = ticker['bidVolume']
ask_volume = ticker['askVolume']
if bid_volume is None:
adapter.warning('Market is unavailable because its bid volume was given as None. '
'It will not be included in the graph.', market=market_name)
return
if ask_volume is None:
adapter.warning('Market is unavailable because its ask volume was given as None. '
'It will not be included in the graph.', market=market_name)
return
# ask and bid == None if this market is non existent.
except TypeError:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
# Exchanges give asks and bids as either 0 or None when they do not exist.
# todo: should we account for exchanges upon which an ask exists but a bid does not (and vice versa)? Would this
# cause bugs?
if ask_rate == 0 or bid_rate == 0 or ask_rate is None or bid_rate is None:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
try:
base_currency, quote_currency = market_name.split('/')
# if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
except ValueError:
if 'markets' not in suppress:
adapter.warning('Market is unavailable at this time due to incorrect formatting. '
'It will not be included in the graph.', market=market_name)
return
if log:
if depth:
graph.add_edge(base_currency, quote_currency, weight=-math.log(fee_scalar * bid_rate),
depth=-math.log(bid_volume), market_name=market_name, trade_type='SELL',
fee=fee, volume=bid_volume, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=-math.log(fee_scalar * 1 / ask_rate),
depth=-math.log(ask_volume * ask_rate), market_name=market_name, trade_type='BUY',
fee=fee, volume=ask_volume, no_fee_rate=ask_rate)
else:
graph.add_edge(base_currency, quote_currency, weight=-math.log(fee_scalar * bid_rate),
market_name=market_name, trade_type='SELL', fee=fee, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=-math.log(fee_scalar * 1 / ask_rate),
market_name=market_name, trade_type='BUY', fee=fee, no_fee_rate=ask_rate)
else:
if depth:
graph.add_edge(base_currency, quote_currency, weight=fee_scalar * bid_rate, depth=bid_volume,
market_name=market_name, trade_type='SELL', fee=fee, volume=bid_volume, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=fee_scalar * 1 / ask_rate, depth=ask_volume,
market_name=market_name, trade_type='BUY', fee=fee, volume=ask_volume, no_fee_rate=ask_rate)
else:
graph.add_edge(base_currency, quote_currency, weight=fee_scalar * bid_rate,
market_name=market_name, trade_type='SELL', fee=fee, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=fee_scalar * 1 / ask_rate,
market_name=market_name, trade_type='BUY', fee=fee, no_fee_rate=ask_rate)
adapter.debug('Added edge to graph', market=market_name)
|
37546
|
import json
from .fragment_doc import fragment_srt, fragment_syosetu, has_unbalanced_quotes, extract_kana_kanji
EXTRACT_KANA_KANJI_CASES = [
['asdf.!ä', ''],
['あいうえお', 'あいうえお'],
['asdこfdれ', 'これ'],
['「ああ、畜生」foo', 'ああ畜生'],
]
for [text, target] in EXTRACT_KANA_KANJI_CASES:
if extract_kana_kanji(text) != target:
print('FAIL EXTRACT KANA+KANJI')
print(text)
QUOTE_BALANCE_CASES = [
['あいうえお', False],
['あい「うえお', True],
['「あいうえお', True],
['「あいうえお」', False],
['あい「うえ」お', False],
['「あい」う「えお」', False],
['「あいう「えお」」', False],
['「あい「うえ」お', True],
['あい「うえ」お」', True],
]
for [text, target] in QUOTE_BALANCE_CASES:
if has_unbalanced_quotes(text) != target:
print('FAIL QUOTE BALANCE')
print(text)
FRAG_CASES = [
['S',
'''
1
00:02:17,440 --> 00:02:20,375
Senator, we're making
our final approach into Coruscant.
2
00:02:20,476 --> 00:02:22,501
Very good, Lieutenant.
''',
[
{'text': "Senator, we're making our final approach into Coruscant.", 'loc': 't:137.440-140.375'},
{'text': 'Very good, Lieutenant.', 'loc': 't:140.476-142.501'},
]
],
# no anchor novel
['N', '<div><p>食べる</p></div>', [{'text': "食べる"}]],
# anchor novel
['N', '<div><p id="L123">食べる</p></div>', [{'text': '食べる', 'loc': 'a:L123'}]],
# no splitting
['N', '<div><p>それでは、行ってまいります</p></div>',
[
{'text': 'それでは、行ってまいります'},
]
],
# simple splitting
['N', '<div><p>そのせいだろうか。あの日に見た空の青を、よく覚えている。</p></div>',
[
{'text': 'そのせいだろうか。'},
{'text': 'あの日に見た空の青を、よく覚えている。'},
]
],
# strip leading dashes
['N', '<div><p>――ああ、そうだったのですか。</p></div>',
[
{'text': 'ああ、そうだったのですか。'},
]
],
# strip leading ellipses
['N', '<div><p>……そうか?</p></div>',
[
{'text': 'そうか?'},
]
],
# strip matching quotes
['N', '<div><p>「ああ、畜生」</p></div>',
[
{'text': 'ああ、畜生'},
]
],
# strip just leading open quote
['N', '<div><p>「あっ、大丈夫です!</p></div>',
[
{'text': 'あっ、大丈夫です!'},
]
],
# strip just trailing close quote
['N', '<div><p>王宮に神父がいるかっ」</p></div>',
[
{'text': '王宮に神父がいるかっ'},
]
],
# combo
['N', '<div><p>「……うん」</p></div>',
[
{'text': 'うん'},
]
],
# don't strip trailing ellipses
['N', '<div><p>「……血……血が……………」</p></div>',
[
{'text': '血……血が……………'},
]
],
# ignore fragments that start with close quote
['N', '<div><p>」と見開いた。</p></div>', []],
# handle other quotes
['N', '<div><p>『モルツ、少し休憩する』</p></div>',
[
{'text': 'モルツ、少し休憩する'},
]
],
# remove leading speaker label
['N', '<div><p>【ポルペオ】「なんだ、その目は?</p></div>',
[
{'text': 'なんだ、その目は?'},
]
],
# remove drama-style speaker label
['N', '<div><p>(平次)おい 大変だ。</p></div>',
[
{'text': 'おい 大変だ。'},
]
],
# TODO: can we get rid of the leading dash?
# ['N', '<div><p id="L75">─ 〝城内〟に命ず。騎士団による警備を撤去せよ。</p></div>',
# [
# {'text': '〝城内〟に命ず。', 'loc': 'a:L75'},
# {'text': '騎士団による警備を撤去せよ。', 'loc': 'a:L75'}
# ]
# ],
]
for [kind, text, expected_result] in FRAG_CASES:
if kind == 'S':
result = fragment_srt(text, None)
elif kind == 'N':
result = fragment_syosetu(text, None)
else:
assert False
# this is hacky, but should be OK
if json.dumps(result, sort_keys=True) != json.dumps(expected_result, sort_keys=True):
print('FAIL')
print('TEXT-----------------')
print(text)
print('TARGET RESULT--------')
print(repr(expected_result))
print('ACTUAL RESULT--------')
print(repr(result))
print()
|
37607
|
import sys;
from queue import Queue
from multiprocessing.managers import BaseManager
import etl;
import json
import extends;
import time;
authkey= "etlpy".encode('utf-8')
timeout=1;
rpc_port=8888
class ETLJob:
def __init__(self,project,jobname,config,id):
self.project= project;
self.jobname=jobname;
self.config=config;
self.id= id;
class JobResult:
def __init__(self,name,count,id):
self.name=name;
self.count=count;
self.id=id;
class Master:
def __init__(self,project,jobname):
# 派发出去的作业队列
self.dispatched_job_queue = Queue()
# 完成的作业队列
self.finished_job_queue = Queue()
self.project= project;
self.jobname=jobname;
self.maxprocess= 10;
def get_dispatched_job_queue(self):
return self.dispatched_job_queue
def get_finished_job_queue(self):
return self.finished_job_queue
def start(self,skip=0):
# 把派发作业队列和完成作业队列注册到网络上
BaseManager.register('get_dispatched_job_queue', callable=self.get_dispatched_job_queue)
BaseManager.register('get_finished_job_queue', callable=self.get_finished_job_queue)
# 监听端口和启动服务
manager = BaseManager(address=('0.0.0.0', rpc_port), authkey=authkey)
manager.start()
# 使用上面注册的方法获取队列
dispatched_jobs = manager.get_dispatched_job_queue()
finished_jobs = manager.get_finished_job_queue()
job_id = 0
module= self.project.modules[self.jobname];
proj=json.loads(json.dumps(etl.convert_dict(self.project,self.project.__defaultdict__), ensure_ascii=False))
while True:
for task in etl.parallel_map(module):
job_id = job_id + 1
if job_id<skip:
continue
job = ETLJob(proj, self.jobname, task, job_id);
print('Dispatch job: %s' % job.id)
dispatched_jobs.put(job)
while not dispatched_jobs.empty():
job = finished_jobs.get(60)
print('Finished Job: %s, Count: %s' % (job.id, job.count))
key=input('press any key to repeat,c to cancel')
if key=='c':
manager.shutdown()
break
#manager.shutdown()
class Slave:
def __init__(self):
# 派发出去的作业队列
self.dispatched_job_queue = Queue()
# 完成的作业队列
self.finished_job_queue = Queue()
def start(self,execute= True,serverip='127.0.0.1',port=8888):
# 把派发作业队列和完成作业队列注册到网络上
BaseManager.register('get_dispatched_job_queue')
BaseManager.register('get_finished_job_queue')
server = serverip;
print('Connect to server %s...' % server)
manager = BaseManager(address=(server, port), authkey=authkey)
manager.connect()
# 使用上面注册的方法获取队列
dispatched_jobs = manager.get_dispatched_job_queue()
finished_jobs = manager.get_finished_job_queue()
# 运行作业并返回结果,这里只是模拟作业运行,所以返回的是接收到的作业
while True:
if dispatched_jobs.empty():
time.sleep(1)
print('queue is empty,wait 1 sec...')
continue;
job = dispatched_jobs.get(timeout=timeout)
print('Run job: %s ' % job.id)
project=job.project;
project= etl.LoadProject_dict(project);
module= project.modules[job.jobname];
count=0
try:
generator= etl.parallel_reduce(module,[ job.config],execute)
for r in generator:
count+=1;
except Exception as e:
print(e)
print('finish job,id %s, count %s'%(job.id,count))
resultjob= JobResult(job.jobname,count,job.id)
finished_jobs.put(resultjob)
if __name__ == '__main__':
ip='127.0.0.1'
port=8888;
argv=sys.argv;
if len(argv)>1:
ip=argv[1];
if len(argv)>2:
port=int(argv[2]);
slave= Slave();
slave.start(True,ip,port);
|
37614
|
from blackboard import BlackBoardContent, BlackBoardClient, BlackBoardAttachment, BlackBoardEndPoints, \
BlackBoardCourse, BlackBoardInstitute
import os
import re
import requests
import datetime
import xmltodict
import argparse
import sys
import json
import getpass
import main
def test():
args = main.handle_arguments(True)
# Institute Data
print("Dumping Institute Properties...")
institute_data = dict()
institute_vars = vars(args.institute)
for item in institute_vars:
institute_data[item] = institute_vars[item]
print("Dumped Institute Properties...")
# Client Data
client_data = dict()
client = BlackBoardClient(username=args.username, password=<PASSWORD>, site=args.site, save_location=args.location, institute=args.institute)
attempt = client.login()
print(f"Client Login {'Successful' if attempt[0] else 'Failure'}...\nDumping Client Properties...")
client_data["public_api_available"] = client.public_endpoint_available()
client_data["login_endpoint"] = attempt[1].url
client_data["login_status_code"] = attempt[1].status_code
client_data["login_response"] = attempt[1].text
client_data["successful_login"] = attempt[0]
client_vars = vars(client)
for item in client_vars:
if item not in ('_BlackBoardClient__password', 'session', 'institute', 'api_version', 'thread_pool'):
client_data[item] = client_vars[item]
print("Dumped Client Properties...")
# Get Parent Course Data
course_data = {
'endpoint': '',
'status_code': '',
'response': '',
'courses': []
}
def get_courses():
"""
Get all Available Course Information for the Client and Record Details
"""
courses_request = client.send_get_request(BlackBoardEndPoints.get_user_courses(client.user_id))
courses = courses_request.json()
course_data['endpoint'] = courses_request.url
course_data['status_code'] = courses_request.status_code
course_data['response'] = courses
if "results" in courses:
for course in courses["results"]:
try:
course_request = client.send_get_request(BlackBoardEndPoints.get_course(course["courseId"]))
course = course_request.json()
bbcourse = BlackBoardCourse(client, course)
course_vars = vars(bbcourse)
course_sub_data = dict()
course_sub_data["course_endpoint"] = course_request.url
course_sub_data['status_code'] = course_request.status_code
for item in course_vars:
course_sub_data[item] = str(course_vars[item])
course_data['courses'].append(course_sub_data)
except Exception as e:
course_data['courses'].append({'error': str(e)})
print("Getting Course Data...")
get_courses()
print("Completed Course Data...")
dumps = {
'institute': institute_data,
'client': client_data,
'courses': course_data,
}
print("Preparing to Dump Debug...")
with open(os.path.abspath(os.path.join(client.base_path, "dump.json")), 'w+') as file:
print(f"Writing File: \"{file.name}\"...")
json.dump(dumps, file)
print("Done...")
if __name__ == "__main__":
test()
|
37627
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
class MapColorControl():
def __init__(self, colour_scheme, map_normalization,data):
self.colors = plt.get_cmap(colour_scheme)(range(256))[:,:3]
self.data = data
if self.data.min() <= 0:
self.data = self.data + abs(self.data.min()) + 1
if map_normalization == "Linear":
self.normNorm = colors.Normalize(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Logarithmic":
self.normNorm = colors.LogNorm(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Power-law":
self.normNorm = colors.PowerNorm(gamma=2,vmin=self.data.min(),vmax=self.data.max())
def get_map_data(self):
datum = np.round(self.normNorm(self.data) * 255)
return self.map(datum)
def map(self,infos):
datum = []
for index in infos:
datum.append(colors.rgb2hex(self.colors[int(index)]))
return datum
class MapControl():
def __init__(self,max_value,min_value,map_normalization,data):
self.data = data
if self.data.min() <=0:
self.data = self.data + abs(self.data.min()) +1
if map_normalization == "Linear":
self.normNorm = colors.Normalize(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Logarithmic":
self.normNorm = colors.LogNorm(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Power-law":
self.normNorm = colors.PowerNorm(gamma=2,vmin=self.data.min(),vmax=self.data.max())
self.maxValue = max_value
self.minValue = min_value
def get_map_data(self,is_round):
if is_round:
datum = np.round(self.normNorm(self.data) * (self.maxValue-self.minValue) + self.minValue,5)
else:
datum = np.round(self.normNorm(self.data) * (self.maxValue - self.minValue) + self.minValue)
return list(datum)
|
37628
|
from django.conf.urls import patterns, url
from lattice.views import (lattices)
from lattice.views import (saveLatticeInfo, saveLattice)
from lattice.views import (saveModel)
from lattice.views import (lattice_home, lattice_content_home, lattice_content_search, lattice_content_list, lattice_content_model_list, lattice_content_details, lattice_content_model_details)
from lattice.views import (lattice_modal, saveLatticeHelper, saveLatticeTypeHelper, saveLatticeStatusHelper, saveModelHelper, saveModelStatusHelper)
urlpatterns = patterns(
'',
# return raw data not thru html ui
url(r'^lattice/$',
lattices,
name='lattices'),
url(r'^lattice/savelatticeinfo/$',
saveLatticeInfo,
name='saveLatticeInfo'),
url(r'^lattice/savelattice$',
saveLattice,
name='saveLattice'),
url(r'^lattice/savemodel$',
saveModel,
name='saveModel'),
url(r'^lattice/web/$',
lattice_home,
name='lattice_home'),
url(r'^lattice/web/index.html$',
lattice_home,
name='lattice_home'),
url(r'^lattice/web/content.html$',
lattice_content_home,
name='lattice_content_home'),
url(r'^lattice/web/search.html$',
lattice_content_search,
name='lattice_content_search'),
url(r'^lattice/web/list.html$',
lattice_content_list,
name='lattice_content_list'),
url(r'^lattice/web/model_list.html$',
lattice_content_model_list,
name='lattice_content_model_list'),
url(r'^lattice/web/details.html$',
lattice_content_details,
name='lattice_content_details'),
url(r'^lattice/web/model_details.html$',
lattice_content_model_details,
name='lattice_content_model_details'),
url(r'^lattice/web/modal/',
lattice_modal,
name='lattice_modal'),
url(r'^lattice/savelatticetype$',
saveLatticeTypeHelper,
name='saveLatticTypeeHelper'),
url(r'^lattice/upload$',
saveLatticeHelper,
name='saveLatticeHelper'),
url(r'^lattice/savestatus$',
saveLatticeStatusHelper,
name='saveLatticeStatusHelper'),
url(r'^model/upload$',
saveModelHelper,
name='saveModelHelper'),
url(r'^model/savestatus$',
saveModelStatusHelper,
name='saveModelStatusHelper'),
)
|
37633
|
import unittest
import torch
from parameterized import parameterized
from torecsys.losses import *
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
class AdaptiveHingeLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = AdaptiveHingeLoss()
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class BayesianPersonalizedRankingLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = BayesianPersonalizedRankingLoss(reduction='sum')
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class HingeLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = HingeLoss()
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class ListnetLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, length: int):
criterion = ListnetLoss()
criterion = criterion.to(device)
y_hat = torch.rand(batch_size, length)
y_true = torch.rand(batch_size, length)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(y_hat, y_true, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class PointwiseLogisticLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = PointwiseLogisticLoss()
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class SkipGramLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32, 32,),
(16, 64, 16,),
(32, 128, 4,),
])
def test_forward(self, batch_size: int, embed_size: int, num_neg: int):
criterion = SkipGramLoss()
criterion = criterion.to(device)
content_inp = torch.rand(batch_size, 1, embed_size)
pos_inp = torch.rand(batch_size, 1, embed_size)
neg_inp = torch.rand(batch_size, num_neg, embed_size)
loss = criterion(content_inp, pos_inp, neg_inp)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class TripletLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32, 32,),
(16, 64, 16,),
(32, 128, 4,),
])
def test_forward(self, batch_size: int, embed_size: int, num_neg: int):
criterion = TripletLoss(margin=1.0, reduction='sum')
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
if __name__ == '__main__':
unittest.main()
|
37636
|
import os
from sqlite3 import dbapi2 as sqlite3
class GarageDb:
def __init__(self, instance_path, resource_path):
self.db_file = os.path.join(instance_path, 'history.db')
self.init_file = os.path.join(resource_path, 'schema.sql')
# Run init script to ensure database structure
conn = self.get_connection()
with open(self.init_file, mode='r') as f:
conn.cursor().executescript(f.read())
conn.commit()
conn.close()
def get_connection(self):
rv = sqlite3.connect(self.db_file)
rv.row_factory = sqlite3.Row
return rv
def record_event(self, user_agent: str, login: str, event: str, description: str):
conn = self.get_connection()
conn.execute('insert into entries (UserAgent, Login, Event, Description) values (?, ?, ?, ?)',
[user_agent, login, event, description])
conn.commit()
conn.close()
def read_history(self):
conn = self.get_connection()
cur = conn.execute('select datetime(timestamp, \'localtime\') as timestamp, event, description from entries order by timestamp desc')
records = cur.fetchmany(500)
conn.close()
return records
def read_full_history(self):
conn = self.get_connection()
cur = conn.execute('select datetime(timestamp, \'localtime\') as timestamp, event, description from entries order by timestamp desc')
records = cur.fetchall()
conn.close()
return records
|
37645
|
from flask_restful import Resource
from flask import request
class Shutdown(Resource):
def get(self):
shutdown = request.environ.get('werkzeug.server.shutdown')
if shutdown is None:
raise RuntimeError('Not running with the Werkzeug Server')
shutdown()
return 'Server shutting down'
|
37675
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.labour import labour
def test_labour():
"""Test module labour.py by downloading
labour.csv and testing shape of
extracted data has 569 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = labour(test_path)
try:
assert x_train.shape == (569, 4)
except:
shutil.rmtree(test_path)
raise()
|
37703
|
from hwt.synthesizer.unit import Unit
from hwt.interfaces.std import VectSignal
from hwt.hdl.types.struct import HStruct
from hwt.interfaces.utils import addClkRstn
class PrivateSignalsOfStructType(Unit):
def _declr(self):
addClkRstn(self)
self.a = VectSignal(8)
self.b = VectSignal(8)._m()
self.c = VectSignal(8)
self.d = VectSignal(8)._m()
def _impl(self):
t = self.a._dtype
tmp_t = \
HStruct(
(t, "a0"),
(t, "a1"),
(t[2], "a2_3"),
(HStruct(
(t, "a4"),
(t[2], "a5_6"),
),
"a4_5_6"
),
)
tmp = self._sig("tmp", tmp_t)
self.connect_tmp_chain(tmp, self.a, self.b)
tmp_reg = self._reg("tmp_reg", tmp_t, def_val={
"a0": 0,
"a1": 1,
"a2_3": [2, 3],
"a4_5_6": {
"a4": 4,
"a5_6": [5, 6],
}
})
self.connect_tmp_chain(tmp_reg, self.c, self.d)
def connect_tmp_chain(self, tmp, a_in, a_out):
# a connected to b using chain of tmp signals from tmp sig
tmp.a0(a_in)
tmp.a1(tmp.a0)
tmp.a2_3[0](tmp.a1)
tmp.a2_3[1](tmp.a2_3[0])
tmp.a4_5_6.a4(tmp.a2_3[1])
tmp.a4_5_6.a5_6[0](tmp.a4_5_6.a4)
tmp.a4_5_6.a5_6[1](tmp.a4_5_6.a5_6[0])
a_out(tmp.a4_5_6.a5_6[1])
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = PrivateSignalsOfStructType()
print(to_rtl_str(u))
|
37711
|
from _carmcmc import *
from carma_pack import CarmaModel, CarmaSample, Car1Sample, power_spectrum, carma_variance, \
carma_process, get_ar_roots
from samplers import MCMCSample
|
37712
|
from django.conf.urls import url
from .views import tracon2022_afterparty_participants_view, tracon2022_afterparty_summary_view
urlpatterns = [
url(
r'^events/(?P<event_slug>tracon2022)/labour/surveys/kaatoilmo/results.xlsx$',
tracon2022_afterparty_participants_view,
name='tracon2022_afterparty_participants_view',
),
url(
r'^events/(?P<event_slug>tracon2022)/labour/surveys/kaatoilmo/summary/?$',
tracon2022_afterparty_summary_view,
name='tracon2022_afterparty_summary_view',
),
]
|
37739
|
from __future__ import unicode_literals
import os, sys, subprocess, ast
from nbconvert.preprocessors import Preprocessor
from holoviews.core import Dimensioned, Store
from holoviews.ipython.preprocessors import OptsMagicProcessor, OutputMagicProcessor
from holoviews.ipython.preprocessors import StripMagicsProcessor
from holoviews.util.command import export_to_python
import tempfile
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def comment_out_magics(source):
"""
Utility used to make sure AST parser does not choke on unrecognized
magics.
"""
filtered = []
for line in source.splitlines():
if line.strip().startswith('%'):
filtered.append('# ' + line)
else:
filtered.append(line)
return '\n'.join(filtered)
def wrap_cell_expression(source, template='{expr}'):
"""
If a cell ends in an expression that could be displaying a HoloViews
object (as determined using the AST), wrap it with a given prefix
and suffix string.
If the cell doesn't end in an expression, return the source unchanged.
"""
cell_output_types = (ast.IfExp, ast.BoolOp, ast.BinOp, ast.Call,
ast.Name, ast.Attribute)
try:
node = ast.parse(comment_out_magics(source))
except SyntaxError:
return source
filtered = source.splitlines()
if node.body != []:
last_expr = node.body[-1]
if not isinstance(last_expr, ast.Expr):
pass # Not an expression
elif isinstance(last_expr.value, cell_output_types):
# CAREFUL WITH UTF8!
expr_end_slice = filtered[last_expr.lineno-1][:last_expr.col_offset]
expr_start_slice = filtered[last_expr.lineno-1][last_expr.col_offset:]
start = '\n'.join(filtered[:last_expr.lineno-1]
+ ([expr_end_slice] if expr_end_slice else []))
ending = '\n'.join(([expr_start_slice] if expr_start_slice else [])
+ filtered[last_expr.lineno:])
if ending.strip().endswith(';'):
return source
# BUG!! Adds newline for 'foo'; <expr>
return start + '\n' + template.format(expr=ending)
return source
def strip_specific_magics(source, magic):
"""
Given the source of a cell, filter out specific cell and line magics.
"""
filtered=[]
for line in source.splitlines():
if line.startswith(f'%{magic}'):
filtered.append(line.lstrip(f'%{magic}').strip(' '))
if line.startswith(f'%%{magic}'):
filtered.append(line.lstrip(f'%%{magic}').strip(' '))
else:
filtered.append(line)
return '\n'.join(filtered)
class StripTimeMagicsProcessor(Preprocessor):
"""
Preprocessor to convert notebooks to Python source strips out just time
magics while keeping the rest of the cell.
"""
def preprocess_cell(self, cell, resources, index):
if cell['cell_type'] == 'code':
cell['source'] = strip_specific_magics(cell['source'], 'time')
return cell, resources
def __call__(self, nb, resources): return self.preprocess(nb,resources)
def strip_trailing_semicolons(source, function):
"""
Give the source of a cell, filter out lines that contain a specified
function call and end in a semicolon.
"""
filtered=[]
for line in source.splitlines():
if line.endswith(f'{function}();'):
filtered.append(line[:-1])
else:
filtered.append(line)
return '\n'.join(filtered)
class StripServableSemicolonsProcessor(Preprocessor):
"""
Preprocessor to convert notebooks to Python source strips out just semicolons
that come after the servable function call.
"""
def preprocess_cell(self, cell, resources, index):
if cell['cell_type'] == 'code':
cell['source'] = strip_trailing_semicolons(cell['source'], 'servable')
return cell, resources
def __call__(self, nb, resources): return self.preprocess(nb,resources)
def thumbnail(obj, basename):
import os
if isinstance(obj, Dimensioned) and not os.path.isfile(basename+'.png'):
Store.renderers[Store.current_backend].save(obj, basename, fmt='png')
elif 'panel' in sys.modules:
from panel.viewable import Viewable
if isinstance(obj, Viewable) and not os.path.isfile(basename+'.png'):
obj.save(basename+'.png')
return obj
class ThumbnailProcessor(Preprocessor):
def __init__(self, basename, **kwargs):
self.basename = basename
super(ThumbnailProcessor, self).__init__(**kwargs)
def preprocess_cell(self, cell, resources, index):
if cell['cell_type'] == 'code':
template = 'from nbsite.gallery.thumbnailer import thumbnail;thumbnail({{expr}}, {basename!r})'
cell['source'] = wrap_cell_expression(cell['source'],
template.format(
basename=self.basename))
return cell, resources
def __call__(self, nb, resources): return self.preprocess(nb,resources)
def execute(code, cwd, env):
with tempfile.NamedTemporaryFile('wb', delete=True) as f:
f.write(code)
f.flush()
proc = subprocess.Popen(['python', f.name], cwd=cwd, env=env)
proc.wait()
return proc.returncode
def notebook_thumbnail(filename, subpath):
basename = os.path.splitext(os.path.basename(filename))[0]
dir_path = os.path.abspath(os.path.join(subpath, 'thumbnails'))
absdirpath= os.path.abspath(os.path.join('.', dir_path))
if not os.path.exists(absdirpath):
os.makedirs(absdirpath)
preprocessors = [OptsMagicProcessor(),
OutputMagicProcessor(),
StripTimeMagicsProcessor(),
StripServableSemicolonsProcessor(),
StripMagicsProcessor(),
ThumbnailProcessor(os.path.abspath(os.path.join(dir_path, basename)))]
return export_to_python(filename, preprocessors)
if __name__ == '__main__':
files = []
abspath = os.path.abspath(sys.argv[1])
split_path = abspath.split(os.path.sep)
if os.path.isdir(abspath):
if 'examples' not in split_path:
print('Can only thumbnail notebooks in examples/')
sys.exit()
subpath = os.path.sep.join(split_path[split_path.index('examples')+1:])
files = [os.path.join(abspath, f) for f in os.listdir(abspath)
if f.endswith('.ipynb')]
elif os.path.isfile(abspath):
subpath = os.path.sep.join(split_path[split_path.index('examples')+1:-1])
files=[abspath]
else:
print('Path {path} does not exist'.format(path=abspath))
for f in files:
print('Generating thumbnail for file {filename}'.format(filename=f))
code = notebook_thumbnail(f, subpath)
try:
retcode = execute(code.encode('utf8'), cwd=os.path.split(f)[0], env={})
except Exception as e:
print('Failed to generate thumbnail for {filename}'.format(filename=f))
print(str(e))
|
37770
|
import qcodes as qc
from qdev_wrappers.sweep_functions import _do_measurement, _do_measurement_single, \
_select_plottables
def measure(meas_param, do_plots=True):
"""
Function which measures the specified parameter and optionally
plots the results.
Args:
meas_param: parameter to measure
do_plots: Default True: If False no plots are produced.
Data is still saved and can be displayed with show_num.
Returns:
data (qcodes dataset)
plot: QT plot
"""
measurement = qc.Measure(meas_param)
meas_params = _select_plottables(meas_param)
plot, data = _do_measurement_single(
measurement, meas_params, do_plots=do_plots)
return data, plot
def sweep1d(meas_param, sweep_param, start, stop, step, delay=0.01,
do_plots=True):
"""
Function which does a 1 dimensional sweep and optionally plots the results.
Args:
meas_param: parameter which we want the value of at each point
sweep_param: parameter to be swept in outer loop (default on y axis)
start: starting value for sweep_param1
stop: final value for sweep_param1
step: value to step sweep_param1 by
delay (default 0.01): mimimum time to spend on each point
do_plots: Default True: If False no plots are produced.
Data is still saved and can be displayed with show_num.
Returns:
data (qcodes dataset)
plot: QT plot
"""
loop = qc.Loop(sweep_param.sweep(
start, stop, step), delay).each(meas_param)
set_params = ((sweep_param, start, stop),)
meas_params = _select_plottables(meas_param)
plot, data = _do_measurement(loop, set_params, meas_params,
do_plots=do_plots)
return data, plot
def sweep2d(meas_param, sweep_param1, start1, stop1, step1,
sweep_param2, start2, stop2, step2, delay=0.01,
do_plots=True):
"""
Function which does a 2 dimensional sweep and optionally plots the results.
Args:
meas_param: parameter which we want the value of at each point
sweep_param1: parameter to be swept in outer loop (default on y axis)
start1: starting value for sweep_param1
stop1: final value for sweep_param1
step1: value to step sweep_param1 by
sweep_param2: parameter to be swept in inner loop (default on x axis)
start2: starting value for sweep_param2
stop2: final value for sweep_param2
step2: value to step sweep_param2 by
delay (default 0.01): mimimum time to spend on each point
do_plots: Default True: If False no plots are produced.
Data is still saved and can be displayed with show_num.
Returns:
data (qcodes dataset)
plot: QT plot
"""
innerloop = qc.Loop(sweep_param2.sweep(
start2, stop2, step2), delay).each(meas_param)
outerloop = qc.Loop(sweep_param1.sweep(
start1, stop1, step1), delay).each(innerloop)
set_params = ((sweep_param1, start1, stop1),
(sweep_param2, start2, stop2))
meas_params = _select_plottables(meas_param)
plot, data = _do_measurement(outerloop, set_params, meas_params,
do_plots=do_plots)
return data, plot
|
37782
|
import json
import os
import sys
import time
from os import path as osp
from pathlib import Path
from shutil import copyfile
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from tqdm import tqdm
from model_temporal import LSTMSeqNetwork, BilinearLSTMSeqNetwork, TCNSeqNetwork
from utils import load_config, MSEAverageMeter
from data_glob_speed import GlobSpeedSequence, SequenceToSequenceDataset, SenseINSSequence
from transformations import ComposeTransform, RandomHoriRotateSeq
from metric import compute_absolute_trajectory_error, compute_relative_trajectory_error
def WriteList(path, name, folders):
with open(path+"/"+name, 'w') as f:
for folder in folders:
f.writelines(folder+"\n")
f.close()
def GetFolderName(path):
names = os.listdir(path+"/")
folders=[]
for name in names:
if os.path.isdir(os.path.join(os.path.abspath(path), name)):
folders.append(name)
folders.sort()
return folders
'''
Temporal models with loss functions in global coordinate frame
Configurations
- Model types
TCN - type=tcn
LSTM_simple - type=lstm, lstm_bilinear
'''
torch.multiprocessing.set_sharing_strategy('file_system')
_nano_to_sec = 1e09
_input_channel, _output_channel = 6, 3
# _input_channel, _output_channel = 6, 2
device = 'cpu'
class GlobalPosLoss(torch.nn.Module):
def __init__(self, mode='full', history=None):
"""
Calculate position loss in global coordinate frame
Target :- Global Velocity
Prediction :- Global Velocity
"""
super(GlobalPosLoss, self).__init__()
self.mse_loss = torch.nn.MSELoss(reduction='none')
assert mode in ['full', 'part']
self.mode = mode
if self.mode == 'part':
assert history is not None
self.history = history
elif self.mode == 'full':
self.history = 1
def forward(self, pred, targ):
gt_pos = torch.cumsum(targ[:, 1:, ], 1)
pred_pos = torch.cumsum(pred[:, 1:, ], 1)
if self.mode == 'part':
gt_pos = gt_pos[:, self.history:, :] - gt_pos[:, :-self.history, :]
pred_pos = pred_pos[:, self.history:, :] - pred_pos[:, :-self.history, :]
loss = self.mse_loss(pred_pos, gt_pos)
return torch.mean(loss)
def write_config(args, **kwargs):
if args.out_dir:
with open(osp.join(args.out_dir, 'config.json'), 'w') as f:
values = vars(args)
values['file'] = "pytorch_global_position"
if kwargs:
values['kwargs'] = kwargs
json.dump(values, f, sort_keys=True)
def get_dataset(root_dir, data_list, args, **kwargs):
input_format, output_format = [0, 3, 6], [0, _output_channel]
mode = kwargs.get('mode', 'train')
random_shift, shuffle, transforms, grv_only = 0, False, [], False
if mode == 'train':
random_shift = args.step_size // 2
shuffle = True
transforms.append(RandomHoriRotateSeq(input_format, output_format))
elif mode == 'val':
shuffle = True
elif mode == 'test':
shuffle = False
grv_only = True
transforms = ComposeTransform(transforms)
if args.dataset == 'ronin':
seq_type = GlobSpeedSequence
elif args.dataset == 'ridi':
from data_ridi import RIDIGlobSpeedSequence
seq_type = RIDIGlobSpeedSequence
elif args.dataset == 'sense':
seq_type = SenseINSSequence
dataset = SequenceToSequenceDataset(seq_type, root_dir, data_list, args.cache_path, args.step_size, args.window_size,
random_shift=random_shift, transform=transforms, shuffle=shuffle,
grv_only=grv_only, args=args, **kwargs)
return dataset
def get_dataset_from_list(root_dir, list_path, args, **kwargs):
with open(list_path) as f:
data_list = [s.strip().split(',')[0] for s in f.readlines() if len(s) > 0 and s[0] != '#']
return get_dataset(root_dir, data_list, args, **kwargs)
def get_model(args, **kwargs):
config = {}
if kwargs.get('dropout'):
config['dropout'] = kwargs.get('dropout')
if args.type == 'tcn':
network = TCNSeqNetwork(_input_channel, _output_channel, args.kernel_size,
layer_channels=args.channels, **config)
print("TCN Network. Receptive field: {} ".format(network.get_receptive_field()))
elif args.type == 'lstm_bi':
print("Bilinear LSTM Network")
network = BilinearLSTMSeqNetwork(_input_channel, _output_channel, args.batch_size, device,
lstm_layers=args.layers, lstm_size=args.layer_size, **config).to(device)
else:
print("Simple LSTM Network")
network = LSTMSeqNetwork(_input_channel, _output_channel, args.batch_size, device,
lstm_layers=args.layers, lstm_size=args.layer_size, **config).to(device)
pytorch_total_params = sum(p.numel() for p in network.parameters() if p.requires_grad)
print('Network constructed. trainable parameters: {}'.format(pytorch_total_params))
return network
def get_loss_function(history, args, **kwargs):
if args.type == 'tcn':
config = {'mode': 'part',
'history': history}
else:
config = {'mode': 'full'}
criterion = GlobalPosLoss(**config)
return criterion
def format_string(*argv, sep=' '):
result = ''
for val in argv:
if isinstance(val, (tuple, list, np.ndarray)):
for v in val:
result += format_string(v, sep=sep) + sep
else:
result += str(val) + sep
return result[:-1]
def train(args, **kwargs):
# Loading data
start_t = time.time()
train_dataset = get_dataset_from_list(args.root_dir, args.train_list, args, mode='train', **kwargs)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True,
drop_last=True)
end_t = time.time()
print('Training set loaded. Time usage: {:.3f}s'.format(end_t - start_t))
val_dataset, val_loader = None, None
if args.val_list is not None:
val_dataset = get_dataset_from_list(args.validation_dir, args.val_list, args, mode='val', **kwargs)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True)
print('Validation set loaded')
global device
device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
if args.out_dir:
if not osp.isdir(args.out_dir):
os.makedirs(args.out_dir)
if not osp.isdir(osp.join(args.out_dir, 'checkpoints')):
os.makedirs(osp.join(args.out_dir, 'checkpoints'))
if not osp.isdir(osp.join(args.out_dir, 'logs')):
os.makedirs(osp.join(args.out_dir, 'logs'))
write_config(args, **kwargs)
print('\nNumber of train samples: {}'.format(len(train_dataset)))
train_mini_batches = len(train_loader)
if val_dataset:
print('Number of val samples: {}'.format(len(val_dataset)))
val_mini_batches = len(val_loader)
network = get_model(args, **kwargs).to(device)
history = network.get_receptive_field() if args.type == 'tcn' else args.window_size // 2
criterion = get_loss_function(history, args, **kwargs)
optimizer = torch.optim.Adam(network.parameters(), args.lr)
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=10, factor=0.75, verbose=True, eps=1e-12)
quiet_mode = kwargs.get('quiet', False)
use_scheduler = kwargs.get('use_scheduler', False)
log_file = None
if args.out_dir:
log_file = osp.join(args.out_dir, 'logs', 'log.txt')
if osp.exists(log_file):
if args.continue_from is None:
os.remove(log_file)
else:
copyfile(log_file, osp.join(args.out_dir, 'logs', 'log_old.txt'))
start_epoch = 0
if args.continue_from is not None and osp.exists(args.continue_from):
with open(osp.join(str(Path(args.continue_from).parents[1]), 'config.json'), 'r') as f:
model_data = json.load(f)
if device.type == 'cpu':
checkpoints = torch.load(args.continue_from, map_location=lambda storage, location: storage)
else:
checkpoints = torch.load(args.continue_from, map_location={model_data['device']: args.device})
start_epoch = checkpoints.get('epoch', 0)
network.load_state_dict(checkpoints.get('model_state_dict'))
optimizer.load_state_dict(checkpoints.get('optimizer_state_dict'))
if kwargs.get('force_lr', False):
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
step = 0
best_val_loss = np.inf
train_errs = np.zeros(args.epochs)
print("Starting from epoch {}".format(start_epoch
))
try:
for epoch in range(start_epoch, args.epochs):
log_line = ''
network.train()
train_vel = MSEAverageMeter(3, [2], _output_channel)
train_loss = 0
start_t = time.time()
for bid, batch in tqdm(enumerate(train_loader)):
feat, targ, _, _ = batch
feat, targ = feat.to(device), targ.to(device)
optimizer.zero_grad()
predicted = network(feat)
train_vel.add(predicted.cpu().detach().numpy(), targ.cpu().detach().numpy())
loss = criterion(predicted, targ)
train_loss += loss.cpu().detach().numpy()
loss.backward()
optimizer.step()
step += 1
train_errs[epoch] = train_loss / train_mini_batches
end_t = time.time()
if not quiet_mode:
print('-' * 25)
print('Epoch {}, time usage: {:.3f}s, loss: {}, val_loss {}/{:.6f}'.format(
epoch, end_t - start_t, train_errs[epoch], train_vel.get_channel_avg(), train_vel.get_total_avg()))
print('Learning rate: {}'.format(optimizer.param_groups[0]['lr']))
log_line = format_string(log_line, epoch, optimizer.param_groups[0]['lr'], train_errs[epoch],
*train_vel.get_channel_avg())
saved_model = False
if val_loader:
network.eval()
val_vel = MSEAverageMeter(3, [2], _output_channel)
val_loss = 0
for bid, batch in tqdm(enumerate(val_loader)):
feat, targ, _, _ = batch
feat, targ = feat.to(device), targ.to(device)
optimizer.zero_grad()
pred = network(feat)
val_vel.add(pred.cpu().detach().numpy(), targ.cpu().detach().numpy())
val_loss += criterion(pred, targ).cpu().detach().numpy()
val_loss = val_loss / val_mini_batches
log_line = format_string(log_line, val_loss, *val_vel.get_channel_avg())
if not quiet_mode:
print('Validation loss: {} val_loss: {}/{:.6f}'.format(val_loss, val_vel.get_channel_avg(),
val_vel.get_total_avg()))
if val_loss < best_val_loss:
best_val_loss = val_loss
saved_model = True
if args.out_dir:
model_path = osp.join(args.out_dir, 'checkpoints', 'checkpoint_%d.pt' % epoch)
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'loss': train_errs[epoch],
'optimizer_state_dict': optimizer.state_dict()}, model_path)
print('Best Validation Model saved to ', model_path)
scheduler.step(val_loss)
if args.out_dir and not saved_model and (epoch + 1) % args.save_interval == 0: # save even with validation
model_path = osp.join(args.out_dir, 'checkpoints', 'icheckpoint_%d.pt' % epoch)
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'loss': train_errs[epoch],
'optimizer_state_dict': optimizer.state_dict()}, model_path)
print('Model saved to ', model_path)
if log_file:
log_line += '\n'
with open(log_file, 'a') as f:
f.write(log_line)
if np.isnan(train_loss):
print("Invalid value. Stopping training.")
break
except KeyboardInterrupt:
print('-' * 60)
print('Early terminate')
print('Training completed')
if args.out_dir:
model_path = osp.join(args.out_dir, 'checkpoints', 'checkpoint_latest.pt')
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict()}, model_path)
def recon_traj_with_preds_global(dataset, preds, ind=None, seq_id=0, type='preds', **kwargs):
ind = ind if ind is not None else np.array([i[1] for i in dataset.index_map if i[0] == seq_id], dtype=np.int)
if type == 'gt':
# pos = dataset.gt_pos[seq_id][:, :2]
pos = dataset.gt_pos[seq_id][:, :3]
else:
ts = dataset.ts[seq_id]
# Compute the global velocity from local velocity.
dts = np.mean(ts[ind[1:]] - ts[ind[:-1]])
pos = preds * dts
# pos[0, :] = dataset.gt_pos[seq_id][0, :2]
pos[0, :] = dataset.gt_pos[seq_id][0, :3]
pos = np.cumsum(pos, axis=0)
veloc = preds
ori = dataset.orientations[seq_id]
return pos, veloc, ori
def test(args, **kwargs):
global device, _output_channel
import matplotlib.pyplot as plt
device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
if args.test_path is not None:
if args.test_path[-1] == '/':
args.test_path = args.test_path[:-1]
root_dir = osp.split(args.test_path)[0]
test_data_list = [osp.split(args.test_path)[1]]
elif args.test_list is not None:
root_dir = args.root_dir if args.root_dir else osp.split(args.test_list)[0]
with open(args.test_list) as f:
test_data_list = [s.strip().split(',')[0] for s in f.readlines() if len(s) > 0 and s[0] != '#']
else:
raise ValueError('Either test_path or test_list must be specified.')
# Load the first sequence to update the input and output size
_ = get_dataset(root_dir, [test_data_list[0]], args, mode='test')
if args.out_dir and not osp.exists(args.out_dir):
os.makedirs(args.out_dir)
with open(osp.join(str(Path(args.model_path).parents[1]), 'config.json'), 'r') as f:
model_data = json.load(f)
if device.type == 'cpu':
checkpoint = torch.load(args.model_path, map_location=lambda storage, location: storage)
else:
checkpoint = torch.load(args.model_path, map_location={model_data['device']: args.device})
network = get_model(args, **kwargs)
network.load_state_dict(checkpoint.get('model_state_dict'))
network.eval().to(device)
print('Model {} loaded to device {}.'.format(args.model_path, device))
log_file = None
if args.test_list and args.out_dir:
log_file = osp.join(args.out_dir, osp.split(args.test_list)[-1].split('.')[0] + '_log.txt')
with open(log_file, 'w') as f:
f.write(args.model_path + '\n')
f.write('Seq traj_len velocity ate rte\n')
losses_vel = MSEAverageMeter(2, [1], _output_channel)
ate_all, rte_all = [], []
pred_per_min = 200 * 60
seq_dataset = get_dataset(root_dir, test_data_list, args, mode='test', **kwargs)
for idx, data in enumerate(test_data_list):
assert data == osp.split(seq_dataset.data_path[idx])[1]
feat, vel = seq_dataset.get_test_seq(idx)
feat = torch.Tensor(feat).to(device)
preds = np.squeeze(network(feat).cpu().detach().numpy())[-vel.shape[0]:, :_output_channel]
ind = np.arange(vel.shape[0])
val_losses = np.mean((vel - preds) ** 2, axis=0)
losses_vel.add(vel, preds)
print('Reconstructing trajectory')
pos_pred, gv_pred, _ = recon_traj_with_preds_global(seq_dataset, preds, ind=ind, type='pred', seq_id=idx)
pos_gt, gv_gt, _ = recon_traj_with_preds_global(seq_dataset, vel, ind=ind, type='gt', seq_id=idx)
if args.out_dir is not None and osp.isdir(args.out_dir):
np.save(osp.join(args.out_dir, '{}_{}.npy'.format(data, args.type)),
np.concatenate([pos_pred, pos_gt], axis=1))
ate = compute_absolute_trajectory_error(pos_pred, pos_gt)
if pos_pred.shape[0] < pred_per_min:
ratio = pred_per_min / pos_pred.shape[0]
rte = compute_relative_trajectory_error(pos_pred, pos_gt, delta=pos_pred.shape[0] - 1) * ratio
else:
rte = compute_relative_trajectory_error(pos_pred, pos_gt, delta=pred_per_min)
pos_cum_error = np.linalg.norm(pos_pred - pos_gt, axis=1)
ate_all.append(ate)
rte_all.append(rte)
print('Sequence {}, Velocity loss {} / {}, ATE: {}, RTE:{}'.format(data, val_losses, np.mean(val_losses), ate,
rte))
log_line = format_string(data, np.mean(val_losses), ate, rte)
if not args.fast_test:
kp = preds.shape[1]
if kp == 2:
targ_names = ['vx', 'vy']
elif kp == 3:
targ_names = ['vx', 'vy', 'vz']
plt.figure('{}'.format(data), figsize=(16, 9))
plt.subplot2grid((kp, 2), (0, 0), rowspan=kp - 1)
plt.plot(pos_pred[:, 0], pos_pred[:, 1])
plt.plot(pos_gt[:, 0], pos_gt[:, 1])
plt.title(data)
plt.axis('equal')
plt.legend(['Predicted', 'Ground truth'])
plt.subplot2grid((kp, 2), (kp - 1, 0))
plt.plot(pos_cum_error)
plt.legend(['ATE:{:.3f}, RTE:{:.3f}'.format(ate_all[-1], rte_all[-1])])
for i in range(kp):
plt.subplot2grid((kp, 2), (i, 1))
plt.plot(ind, preds[:, i])
plt.plot(ind, vel[:, i])
plt.legend(['Predicted', 'Ground truth'])
plt.title('{}, error: {:.6f}'.format(targ_names[i], val_losses[i]))
plt.tight_layout()
if args.show_plot:
plt.show()
if args.out_dir is not None and osp.isdir(args.out_dir):
plt.savefig(osp.join(args.out_dir, '{}_{}.png'.format(data, args.type)))
if log_file is not None:
with open(log_file, 'a') as f:
log_line += '\n'
f.write(log_line)
plt.close('all')
ate_all = np.array(ate_all)
rte_all = np.array(rte_all)
measure = format_string('ATE', 'RTE', sep='\t')
values = format_string(np.mean(ate_all), np.mean(rte_all), sep='\t')
print(measure, '\n', values)
if log_file is not None:
with open(log_file, 'a') as f:
f.write(measure + '\n')
f.write(values)
if __name__ == '__main__':
"""
Run file with individual arguments or/and config file. If argument appears in both config file and args,
args is given precedence.
"""
default_config_file = osp.abspath(osp.join(osp.abspath(__file__), '../../config/temporal_model_defaults.json'))
import argparse
parser = argparse.ArgumentParser(description="Run seq2seq model in train/test mode [required]. Optional "
"configurations can be specified as --key [value..] pairs",
add_help=True)
parser.add_argument('--config', type=str, help='Configuration file [Default: {}]'.format(default_config_file),
default=default_config_file)
# common
parser.add_argument('--type', type=str, choices=['tcn', 'lstm', 'lstm_bi'], help='Model type', default='lstm')
parser.add_argument('--root_dir', type=str, default="/data/INSData/ins_data_test/IDOL_SenseINS/building1/train_debug", help='Path to data directory')
parser.add_argument('--validation_dir', type=str, default="/data/INSData/ins_data_test/IDOL_SenseINS/building1/train_debug")
# parser.add_argument('--root_dir', type=str,
# default="/home/SENSETIME/xurunsen/project/ronin/RONIN/train_debug",
# help='Path to data directory')
# parser.add_argument('--validation_dir', type=str,
# default="/home/SENSETIME/xurunsen/project/ronin/RONIN/train_debug")
parser.add_argument('--cache_path', type=str, default=None)
parser.add_argument('--feature_sigma', type=float, help='Gaussian for smoothing features')
parser.add_argument('--target_sigma', type=float, help='Gaussian for smoothing target')
parser.add_argument('--window_size', type=int)
parser.add_argument('--step_size', type=int)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--num_workers', type=int)
parser.add_argument('--out_dir', type=str, default='../output/ronin_lstm/idol/2021.05.14/train_debug')
parser.add_argument('--device', type=str, help='Cuda device (e.g:- cuda:0) or cpu')
parser.add_argument('--dataset', type=str, choices=['ronin', 'ridi', 'sense'], default='sense')
parser.add_argument('--imu_freq', type=int, default=200)
# tcn
tcn_cmd = parser.add_argument_group('tcn', 'configuration for TCN')
tcn_cmd.add_argument('--kernel_size', type=int)
tcn_cmd.add_argument('--channels', type=str, help='Channel sizes for TCN layers (comma separated)')
# lstm
lstm_cmd = parser.add_argument_group('lstm', 'configuration for LSTM')
lstm_cmd.add_argument('--layers', type=int)
lstm_cmd.add_argument('--layer_size', type=int)
mode = parser.add_subparsers(title='mode', dest='mode', help='Operation: [train] train model, [test] evaluate model')
mode.required = False
# train
train_cmd = mode.add_parser('train')
train_cmd.add_argument('--train_list', type=str)
train_cmd.add_argument('--val_list', type=str)
train_cmd.add_argument('--continue_from', type=str, default=None)
train_cmd.add_argument('--epochs', type=int)
train_cmd.add_argument('--save_interval', type=int)
train_cmd.add_argument('--lr', '--learning_rate', type=float)
# test
test_cmd = mode.add_parser('test')
test_cmd.add_argument('--test_path', type=str, default=None)
test_cmd.add_argument('--test_list', type=str, default=None)
test_cmd.add_argument('--model_path', type=str, default='/home/SENSETIME/xurunsen/project/ronin/output/ronin_lstm/idol/2021.05.14/train_debug/checkpoints/checkpoint_714.pt')
test_cmd.add_argument('--fast_test', action='store_true')
test_cmd.add_argument('--show_plot', action='store_true')
'''
Extra arguments
Set True: use_scheduler,
quite (no output on stdout),
force_lr (force lr when a model is loaded from continue_from)
float: dropout,
max_ori_error (err. threshold for priority grv in degrees)
max_velocity_norm (filter outliers in training)
'''
args, unknown_args = parser.parse_known_args()
np.set_printoptions(formatter={'all': lambda x: '{:.6f}'.format(x)})
args, kwargs = load_config(default_config_file, args, unknown_args)
print(args, kwargs)
# add by runsen
# write list
if args.mode == "train":
if args.train_list is None:
WriteList(args.root_dir, "train_list.txt", GetFolderName(args.root_dir))
args.train_list = args.root_dir + "/train_list.txt"
if args.validation_dir is not None:
WriteList(args.validation_dir, "validation_list.txt", GetFolderName(args.validation_dir))
args.val_list = args.validation_dir + "/validation_list.txt"
elif args.mode == "test":
if args.test_list is None:
WriteList(args.root_dir, "test_list.txt", GetFolderName(args.root_dir))
args.test_list = args.root_dir + "/test_list.txt"
if args.mode == 'train':
train(args, **kwargs)
elif args.mode == 'test':
if not args.model_path:
raise ValueError("Model path required")
args.batch_size = 1
test(args, **kwargs)
|
37821
|
from templeplus.pymod import PythonModifier
from toee import *
import tpdp
import char_class_utils
import d20_action_utils
###################################################
def GetConditionName():
return "Duelist"
print "Registering " + GetConditionName()
classEnum = stat_level_duelist
preciseStrikeEnum = 2400
###################################################
#### standard callbacks - BAB and Save values
def OnGetToHitBonusBase(attachee, args, evt_obj):
classLvl = attachee.stat_level_get(classEnum)
babvalue = game.get_bab_for_class(classEnum, classLvl)
evt_obj.bonus_list.add(babvalue, 0, 137) # untyped, description: "Class"
return 0
def OnGetSaveThrowFort(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Fortitude)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def OnGetSaveThrowReflex(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Reflex)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def OnGetSaveThrowWill(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Will)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def IsArmorless( obj ):
armor = obj.item_worn_at(5)
if armor != OBJ_HANDLE_NULL:
armorFlags = armor.obj_get_int(obj_f_armor_flags)
if armorFlags != ARMOR_TYPE_NONE:
return 0
shield = obj.item_worn_at(11)
if shield != OBJ_HANDLE_NULL:
return 0
return 1
def IsRangedWeapon( weap ):
weapFlags = weap.obj_get_int(obj_f_weapon_flags)
if (weapFlags & OWF_RANGED_WEAPON) == 0:
return 0
return 1
def CannyDefenseAcBonus(attachee, args, evt_obj):
if not IsArmorless(attachee):
return 0
weap = attachee.item_worn_at(3)
if weap == OBJ_HANDLE_NULL or IsRangedWeapon(weap):
weap = attachee.item_worn_at(4)
if weap == OBJ_HANDLE_NULL or IsRangedWeapon(weap):
return 0
duelistLvl = attachee.stat_level_get(classEnum)
intScore = attachee.stat_level_get(stat_intelligence)
intBonus = (intScore - 10)/2
if intBonus <= 0:
return
if duelistLvl < intBonus:
intBonus = duelistLvl
evt_obj.bonus_list.modify(intBonus , 3, 104) # Dexterity bonus, ~Class~[TAG_LEVEL_BONUSES]
return 0
def ImprovedReactionInitBonus(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 2:
return 0
bonVal = 2
if duelistLvl >= 8:
bonVal = 4
evt_obj.bonus_list.add(bonVal, 0, 137 ) # adds untyped bonus to initiative
return 0
def EnhancedMobility(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 3:
return 0
if not IsArmorless(attachee):
return 0
if evt_obj.attack_packet.get_flags() & D20CAF_AOO_MOVEMENT:
evt_obj.bonus_list.add(4, 8, 137 ) # adds +4 dodge bonus
return 0
def GraceReflexBonus(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 4:
return 0
if not IsArmorless(attachee):
return 0
evt_obj.bonus_list.add(2, 34, 137) # Competence bonus
return 0
# def PreciseStrikeRadial(attachee, args, evt_obj):
# duelistLvl = attachee.stat_level_get(classEnum)
# if (duelistLvl < 5):
# return 0
## add radial menu action Precise Strike
# radialAction = tpdp.RadialMenuEntryPythonAction(-1, D20A_PYTHON_ACTION, preciseStrikeEnum, 0, "TAG_INTERFACE_HELP")
# radialParentId = radialAction.add_child_to_standard(attachee, tpdp.RadialMenuStandardNode.Class)
# return 0
# def OnPreciseStrikeCheck(attachee, args, evt_obj):
# if (not IsUsingLightOrOneHandedPiercing(attachee)):
# evt_obj.return_val = AEC_WRONG_WEAPON_TYPE
# return 0
# tgt = evt_obj.d20a.target
# stdChk = ActionCheckTargetStdAtk(attachee, tgt)
# if (stdChk != AEC_OK):
# evt_obj.return_val = stdChk
# return 0
# def OnPreciseStrikePerform(attachee, args, evt_obj):
# print "I performed!"
# return 0
preciseStrikeString = "Precise Strike"
def PreciseStrikeDamageBonus(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 5:
return 0
# check if attacking with one weapon and without a shield
if (attachee.item_worn_at(4) != OBJ_HANDLE_NULL and attachee.item_worn_at(3) != OBJ_HANDLE_NULL) or attachee.item_worn_at(11) != OBJ_HANDLE_NULL:
return 0
# check if light or one handed piercing
if not IsUsingLightOrOneHandedPiercing(attachee):
return 0
tgt = evt_obj.attack_packet.target
if tgt == OBJ_HANDLE_NULL: # shouldn't happen but better be safe
return 0
if tgt.d20_query(Q_Critter_Is_Immune_Critical_Hits):
return 0
damage_dice = dice_new('1d6')
if duelistLvl >= 10:
damage_dice.number = 2
evt_obj.damage_packet.add_dice(damage_dice, -1, 127 )
return 0
def ElaborateParry(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 7:
return 0
if not attachee.d20_query(Q_FightingDefensively): # this also covers Total Defense
return 0
evt_obj.bonus_list.add(duelistLvl , 8, 137) # Dodge bonus, ~Class~[TAG_LEVEL_BONUSES]
return 0
def IsUsingLightOrOneHandedPiercing( obj ):
weap = obj.item_worn_at(3)
offhand = obj.item_worn_at(4)
if weap == OBJ_HANDLE_NULL and offhand == OBJ_HANDLE_NULL:
return 0
if weap == OBJ_HANDLE_NULL:
weap = offhand
offhand = OBJ_HANDLE_NULL
if IsWeaponLightOrOneHandedPiercing(obj, weap):
return 1
# check the offhand
if offhand != OBJ_HANDLE_NULL:
if IsWeaponLightOrOneHandedPiercing(obj, offhand):
return 1
return 0
def IsWeaponLightOrOneHandedPiercing( obj, weap):
# truth table
# nor. | enlarged | return
# 0 x 1 assume un-enlarged state
# 1 0 1 shouldn't be possible... unless it's actually reduce person (I don't really care about that)
# 1 1 is_piercing
# 1 2 is_piercing
# 2 x 0
# 3 x 0
normalWieldType = obj.get_wield_type(weap, 1) # "normal" means weapon is not enlarged
if normalWieldType >= 2: # two handed or unwieldable
return 0
if normalWieldType == 0:
return 1
# otherwise if the weapon is also enlarged;
wieldType = obj.get_wield_type(weap, 0)
if wieldType == 0:
return 1
# weapon is not light, but is one handed - check if piercing
attackType = weap.obj_get_int(obj_f_weapon_attacktype)
if attackType == D20DT_PIERCING: # should be strictly piercing from what I understand (supposed to be rapier-like)
return 1
return 0
def DuelistDeflectArrows(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 9:
return 0
offendingWeapon = evt_obj.attack_packet.get_weapon_used()
if offendingWeapon == OBJ_HANDLE_NULL:
return 0
if not (evt_obj.attack_packet.get_flags() & D20CAF_RANGED):
return 0
# check if attacker visible
attacker = evt_obj.attack_packet.attacker
if attacker == OBJ_HANDLE_NULL:
return 0
if attacker.d20_query(Q_Critter_Is_Invisible) and not attachee.d20_query(Q_Critter_Can_See_Invisible):
return 0
if attachee.d20_query(Q_Critter_Is_Blinded):
return 0
# check flatfooted
if attachee.d20_query(Q_Flatfooted):
return 0
# check light weapon or one handed piercing
if not IsUsingLightOrOneHandedPiercing(attachee):
return 0
atkflags = evt_obj.attack_packet.get_flags()
atkflags |= D20CAF_DEFLECT_ARROWS
atkflags &= ~(D20CAF_HIT | D20CAF_CRITICAL)
evt_obj.attack_packet.set_flags(atkflags)
return 0
classSpecObj = PythonModifier(GetConditionName(), 0)
classSpecObj.AddHook(ET_OnToHitBonusBase, EK_NONE, OnGetToHitBonusBase, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_FORTITUDE, OnGetSaveThrowFort, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_REFLEX, OnGetSaveThrowReflex, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_WILL, OnGetSaveThrowWill, ())
classSpecObj.AddHook(ET_OnGetAC, EK_NONE, CannyDefenseAcBonus, ())
classSpecObj.AddHook(ET_OnGetAC, EK_NONE, EnhancedMobility, ())
classSpecObj.AddHook(ET_OnGetAC, EK_NONE, ElaborateParry, ())
classSpecObj.AddHook(ET_OnGetInitiativeMod, EK_NONE, ImprovedReactionInitBonus, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_REFLEX, GraceReflexBonus, ())
classSpecObj.AddHook(ET_OnDealingDamage, EK_NONE, PreciseStrikeDamageBonus, ())
classSpecObj.AddHook(ET_OnDeflectArrows, EK_NONE, DuelistDeflectArrows, ())
|
37839
|
import math
import os
import tempfile
from contextlib import contextmanager
from soap import logger
from soap.common.cache import cached
from soap.expression import operators, OutputVariableTuple
from soap.semantics.error import IntegerInterval, ErrorSemantics
flopoco_command_map = {
'IntAdder': ('{wi}', ),
'IntMultiplier': ('{wi}', '{wi}', '{wi}', '1', '1', '0'),
'FPAdder': ('{we}', '{wf}'),
'FPMultiplier': ('{we}', '{wf}', '{wf}'),
'FPSquarer': ('{we}', '{wf}', '{wf}'),
'FPDiv': ('{we}', '{wf}'),
'FPPow': ('{we}', '{wf}'),
'FPExp': ('{we}', '{wf}'),
'FPLog': ('{we}', '{wf}', '0'),
}
flopoco_operators = tuple(flopoco_command_map)
operators_map = {
operators.ADD_OP: ['FPAdder', 'IntAdder'],
operators.SUBTRACT_OP: ['FPAdder', 'IntAdder'],
operators.MULTIPLY_OP: ['FPMultiplier', 'IntMultiplier'],
operators.DIVIDE_OP: 'FPDiv',
operators.LESS_OP: ['FPAdder', 'IntAdder'],
operators.LESS_EQUAL_OP: ['FPAdder', 'IntAdder'],
operators.GREATER_OP: ['FPAdder', 'IntAdder'],
operators.GREATER_EQUAL_OP: ['FPAdder', 'IntAdder'],
operators.EQUAL_OP: ['FPAdder', 'IntAdder'],
operators.NOT_EQUAL_OP: ['FPAdder', 'IntAdder'],
operators.TERNARY_SELECT_OP: 'Multiplexer',
operators.FIXPOINT_OP: 'Null',
operators.UNARY_SUBTRACT_OP: 'OneLUT',
}
we_min, we_max = 5, 15
we_range = list(range(we_min, we_max + 1))
wf_min, wf_max = 10, 112
wf_range = list(range(wf_min, wf_max + 1))
wi_min, wi_max = 1, 100
wi_range = list(range(wi_min, wi_max + 1))
directory = os.path.dirname(__file__)
default_file = os.path.join(directory, 'luts.pkl')
template_file = os.path.join(directory, 'template.vhdl')
device_name = 'Virtex6'
device_model = 'xc6vlx760'
@contextmanager
def cd(d):
import sh
p = os.path.abspath(os.curdir)
if d:
sh.mkdir('-p', d)
sh.cd(d)
try:
yield
except Exception:
raise
finally:
sh.cd(p)
def flopoco_key(fop, we=-1, wf=-1, wi=-1):
try:
format_tuple = flopoco_command_map[fop]
except KeyError:
raise ValueError('Unrecognised operator {}'.format(fop))
args = [fop]
args += [a.format(we=we, wf=wf, wi=wi) for a in format_tuple]
return tuple(args)
def flopoco(key, file_name=None, dir_name=None):
import sh
file_name = file_name or tempfile.mktemp(suffix='.vhdl', dir='')
cmd = ('-target=' + device_name, '-outputfile=' + file_name) + key
logger.debug('flopoco: {!r}'.format(cmd))
dir_name = dir_name or tempfile.mktemp(suffix='/')
with cd(dir_name):
sh.flopoco(*cmd)
try:
with open(file_name) as fh:
if not fh.read():
raise IOError()
except (IOError, FileNotFoundError):
logger.error('Flopoco failed to generate file ' + file_name)
raise
return file_name, dir_name
def get_luts(file_name):
from bs4 import BeautifulSoup
with open(file_name, 'r') as f:
f = BeautifulSoup(f.read())
app = f.document.application
util = app.find('section', stringid='XST_DEVICE_UTILIZATION_SUMMARY')
luts = util.find('item', stringid='XST_NUMBER_OF_SLICE_LUTS')
if luts:
return int(luts.get('value'))
logger.warning('{} requires no LUTs'.format(file_name))
return 0
def xilinx(file_name, dir_name=None):
import sh
file_base = os.path.split(file_name)[1]
file_base = os.path.splitext(file_base)[0]
synth_name = file_base + '.ngc'
cmd = ['run', '-p', device_model]
cmd += ['-ifn', file_name, '-ifmt', 'VHDL']
cmd += ['-ofn', synth_name, '-ofmt', 'NGC']
logger.debug('xst: {!r}'.format(cmd))
dir_name = dir_name or tempfile.mktemp(suffix='/')
with cd(dir_name):
out_file_name = file_base + '.out.log'
err_file_name = file_base + '.err.log'
sh.xst(sh.echo(*cmd), _out=out_file_name, _err=err_file_name)
return get_luts(file_base + '.ngc_xst.xrpt')
_FILTER_OPERATORS = operators.TRADITIONAL_OPERATORS + [
operators.TERNARY_SELECT_OP
]
@cached
def _datatype_exponent(op, label):
if isinstance(label, OutputVariableTuple):
exponent = 0
for l in label:
label_datatype, label_exponent = _datatype_exponent(op, l)
exponent += label_exponent
return None, exponent
if op == operators.FIXPOINT_OP:
return None, 0
if op not in _FILTER_OPERATORS:
return None, None
bound = label.bound
datatype = type(bound)
if datatype is IntegerInterval:
if bound.is_top():
return datatype, flopoco.wi_max
if bound.is_bottom():
return datatype, flopoco.wi_min
bound_max = max(abs(bound.min), abs(bound.max), 1)
width_max = int(math.ceil(math.log(bound_max + 1, 2)) + 1)
return datatype, width_max
if datatype is ErrorSemantics:
bound = bound.v
if bound.is_top():
return datatype, flopoco.we_max
if bound.is_bottom():
return datatype, flopoco.we_min
bound_max = max(abs(bound.min), abs(bound.max), 1)
try:
exp_max = math.floor(math.log(bound_max, 2))
except OverflowError:
return datatype, flopoco.we_max
try:
exponent = int(math.ceil(math.log(exp_max + 1, 2) + 1))
return datatype, max(exponent, flopoco.we_min)
except ValueError:
return datatype, flopoco.we_min
raise TypeError('Unrecognized type of bound {!r}'.format(bound))
|
37928
|
import os.path as osp
import os
import pylab as plt
import gc
import argparse
from utils import read_image
parser = argparse.ArgumentParser(description='Plot rank-5 results of S-ReID, SP-ReID and SSP-ReID')
parser.add_argument('-d', '--dataset', type=str, default='market1501')
# Architecture
parser.add_argument('-a', '--arch', type=str, default='resnet50')
parser.add_argument('--save-dir', type=str, default='log/tmp')
args = parser.parse_args()
def plot(images, save_name):
num_figs = len(images)
fig = plt.figure(figsize = (30, 20))
for i, img in enumerate(images):
a = fig.add_subplot(num_figs, 1, i + 1)
plt.imshow(img)
plt.axis('off')
fig.savefig(save_name, bbox_inches='tight')
fig.clf()
plt.close()
del a
gc.collect()
def combine_fig(file_name, salience_dir, parsing_dir, salience_parsing_dir, save_dir):
salience_file = osp.join(salience_dir, file_name)
parsing_file = osp.join(parsing_dir, file_name)
salience_parsing_file = osp.join(salience_parsing_dir, file_name)
save_file = osp.join(save_dir, file_name)
images = [read_image(salience_file), read_image(parsing_file), read_image(salience_parsing_file)]
plot(images, save_file)
def main():
dataset = args.dataset
model = args.arch
salience_dir = osp.join('log/', '{}-salience-{}/-1'.format(model, dataset))
parsing_dir = osp.join('log/', '{}-parsing-{}/-1'.format(model, dataset))
salience_parsing_dir = osp.join('log/', '{}-salience-parsing-{}/-1'.format(model, dataset))
save_dir = osp.join(args.save_dir, '{}-improvement-{}'.format(model, dataset))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
list_figs = os.listdir(salience_dir)
for img_name in list_figs:
combine_fig(img_name, salience_dir, parsing_dir, salience_parsing_dir, save_dir)
if __name__ == '__main__':
main()
|
37933
|
import unittest
from unittest.mock import MagicMock
from datetime import timedelta
from osgar.bus import Bus
from osgar.node import Node
class NodeTest(unittest.TestCase):
def test_usage(self):
empty_config = {}
bus = Bus(logger=MagicMock())
node = Node(config=empty_config, bus=bus.handle('mynode'))
node.start()
node.request_stop()
node.join()
def test_update(self):
empty_config = {}
bus = Bus(logger=MagicMock())
node = Node(config=empty_config, bus=bus.handle('mynode'))
tester = bus.handle('tester')
tester.register('vel')
bus.connect('tester.vel', 'mynode.vel')
dt = tester.publish('vel', 3)
node.update()
self.assertEqual(node.time, dt)
self.assertEqual(node.vel, 3)
node2 = Node(config=empty_config, bus=bus.handle('mynode2'))
self.assertNotIn('vel', dir(node2))
# vim: expandtab sw=4 ts=4
|
37946
|
import os
import json
import pickle
import collections
import numpy as np
from s2and.consts import CONFIG
DATA_DIR = CONFIG["main_data_dir"]
OUTPUT_DIR = os.path.join(DATA_DIR, "s2and_mini")
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
# excluding MEDLINE because it has no clusters
DATASETS = [
"aminer",
"arnetminer",
"inspire",
"kisti",
"pubmed",
"qian",
"zbmath",
]
BIG_BLOCK_CUTOFF = 500
TOP_BLOCKS_TO_KEEP = 1000
# load all of the artifacts of each dataset
clusters_all = []
signatures_all = []
X_all = []
keys_all = []
papers_all = []
for dataset in DATASETS:
print()
print(f"Loading data from {dataset}...")
for file_name in os.listdir(os.path.join(DATA_DIR, dataset)):
file_name = os.path.join(DATA_DIR, dataset, file_name)
if "specter" in file_name:
with open(file_name, "rb") as _pickle_file:
X, keys = pickle.load(_pickle_file)
X_all.append(X)
keys_all.append(keys)
elif "cluster" in file_name:
with open(file_name) as _json_file:
clusters = json.load(_json_file)
new_clusters = {}
for cluster_id, v in clusters.items():
new_cluster_id = f"{dataset}_{cluster_id}"
new_v = {
"cluster_id": new_cluster_id,
"signature_ids": [f"{dataset}_{i}" for i in v["signature_ids"]],
"model_version": v["model_version"],
}
new_clusters[new_cluster_id] = new_v
clusters_all.append(new_clusters)
elif "paper" in file_name:
with open(file_name) as _json_file:
papers = json.load(_json_file)
papers_all.append(papers)
elif "signature" in file_name:
with open(file_name) as _json_file:
signatures = json.load(_json_file)
new_signatures = {}
for signature_id, v in signatures.items():
new_signature_id = f"{dataset}_{signature_id}"
new_v = {
"author_id": v["author_id"], # maybe this needs to be prepended by dataset?
"paper_id": v["paper_id"],
"signature_id": new_signature_id,
"author_info": v["author_info"],
}
new_signatures[new_signature_id] = new_v
signatures_all.append(new_signatures)
else:
print(f"WARNING: Ignoring {file_name} in {dataset}")
print("Finished loading data. Filtering...")
# the goal is speed so we'll remove the largest blocks
# also only keep top 1000 blocks max
# aminer has 32k, inspire has 15k, and kisti has 7k blocks
for dataset, s, c, p, X, k in zip(DATASETS, signatures_all, clusters_all, papers_all, X_all, keys_all):
blocks = []
for v in s.values():
blocks.append(v["author_info"]["block"])
vc = collections.Counter(blocks)
blocks_to_keep = set([k for k, v in sorted(vc.items()) if v <= BIG_BLOCK_CUTOFF][:TOP_BLOCKS_TO_KEEP])
s_filtered = {k: v for k, v in s.items() if v["author_info"]["block"] in blocks_to_keep}
# filter the clusters too
c_filtered = {k: v for k, v in c.items() if np.all([i in s_filtered for i in v["signature_ids"]])}
# go back through the clusters and find the signatures we'll actually need
# need to do this because sometimes the block name is just... corrupted
# e.g. "g miller" for most signatures but "g mller" for one...
signature_keys_to_keep = set()
for v in c_filtered.values():
signature_keys_to_keep.update(v["signature_ids"])
s_filtered = {k: v for k, v in s.items() if k in signature_keys_to_keep}
# we don't need all the papers anymore. just the ones in signatures
# also the references of those
paper_ids = set([v["paper_id"] for v in s_filtered.values()])
ref_paper_ids = set()
for v in p.values():
if v["references"] is not None:
ref_paper_ids.update(v["references"])
p_filtered = {k: v for k, v in p.items() if int(k) in paper_ids or int(k) in ref_paper_ids}
# filter down the specters to those in papers only since we don't use specters for references
keys_filtered_flag = np.array([i in paper_ids for i in k.astype(int)])
k_filtered = k[keys_filtered_flag]
X_filtered = X[keys_filtered_flag, :]
# save all of the data
data_output_dir = os.path.join(DATA_DIR, "s2and_mini", dataset)
if not os.path.exists(data_output_dir):
os.mkdir(data_output_dir)
with open(os.path.join(data_output_dir, f"{dataset}_clusters.json"), "w") as _json_file:
json.dump(c_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_signatures.json"), "w") as _json_file:
json.dump(s_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_papers.json"), "w") as _json_file:
json.dump(p_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_specter.pickle"), "wb") as _pickle_file:
pickle.dump((X_filtered, k_filtered), _pickle_file)
|
37983
|
from .eval_card import EvaluationCard
from .evaluator import Evaluator
from .lookup import LookupTable
|
37985
|
import re
from email_validator import validate_email, EmailSyntaxError
from virtool.users.utils import PERMISSIONS
RE_HEX_COLOR = re.compile("^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$")
def strip(value: str) -> str:
"""
Strip flanking whitespace from the passed string. Used to coerce values in Cerberus validators.
:param value: the string to strip
:return: the stripped string
"""
return value.strip()
def is_permission_dict(field: str, value: dict, error: callable):
"""
Checks that all keys included in permissions dictionary are valid permissions.
If invalid key is found, error message is updated to "keys must be valid permissions"
:param field: permissions field to check
:param value: permissions dictionary value
:param error: points to the calling validator’s _error method
"""
if any(key not in PERMISSIONS for key in value):
error(field, "keys must be valid permissions")
def has_unique_segment_names(field: str, value: list, error: callable):
"""
Checks that no duplicate names are used for segment names in list
If duplicate names are found, error message is updated to "list contains duplicate names"
:param field: field to check
:param value: list value
:param error: points to the calling validator’s _error method
"""
if len({seg["name"] for seg in value}) != len(value):
error(field, "list contains duplicate names")
def is_valid_hex_color(field: str, value: str, error: callable):
"""
Checks that color is a valid Hexadecimal color, performs check using regex format comparison
If color is an invalid Hexadecimal color, error message is updated to "This is not a valid Hexadecimal color"
:param field: color field to check
:param value: color string value
:param error: points to the calling validator’s _error method
"""
if not RE_HEX_COLOR.match(value):
error(field, "This is not a valid Hexadecimal color")
def is_valid_email(field: str, value: str, error: callable):
"""
Checks that email is a valid email according to email_validator.validate_email
If email is invalid, error message is updated to "Not a valid email"
:param field: email field to check
:param value: email string value
:param error: points to the calling validator’s _error method
"""
try:
validate_email(value)
except EmailSyntaxError:
error(field, "Not a valid email")
|
37995
|
import os
import subprocess
from pretty_print import Print_C
class Runner:
run_kases = 3
def __init__(self, scheme, testcases):
self.scheme = scheme
self.testcases = testcases
self.bin_file_template = f"build/test_results/{{testcase}}/bin/{scheme}"
self.myout_template = f"build/output/{{testcase}}/{scheme}.out"
self.runner_log = f"build/log/run_log/{{testcase}}/{scheme}_{{kase}}.out"
for testcase in testcases:
self.__generate_path(testcase)
def __generate_path(self, testcase):
myout_path = f"build/output/{testcase}/"
runner_log_path = f"build/log/run_log/{testcase}/"
if not os.path.exists(myout_path):
os.makedirs(myout_path)
if not os.path.exists(runner_log_path):
os.makedirs(runner_log_path)
def run_single_test(self, testcase, kase):
bin = self.bin_file_template.format(testcase=testcase)
stdin = f"testcases/{testcase}.in"
myout = self.myout_template.format(testcase=testcase)
log = self.runner_log.format(testcase=testcase, kase=kase)
myout_file = open(myout, "a+")
log_file = open(log, "a+")
null_file = open(os.devnull, "w")
Print_C.print_procedure(f"Running {self.scheme}_{testcase} [kase: {kase}]")
if os.path.exists(stdin):
stdin_file = open(stdin, "r")
if kase == 0:
p = subprocess.run(f"{bin}".split(), stdin=stdin_file, stdout=myout_file, stderr=log_file, bufsize=1)
subprocess.run(f"echo".split(), stdout=myout_file, bufsize=1)
subprocess.run(f"echo {p.returncode}".split(), stdout=myout_file, bufsize=1)
else:
p = subprocess.run(f"{bin}".split(), stdin=stdin_file, stdout=null_file, stderr=log_file, bufsize=1)
stdin_file.close()
else:
if kase == 0:
p = subprocess.run(f"{bin}".split(), stdout=myout_file, stderr=log_file, bufsize=1)
subprocess.run(f"echo".split(), stdout=myout_file, bufsize=1)
subprocess.run(f"echo {p.returncode}".split(), stdout=myout_file, bufsize=1)
else:
p = subprocess.run(f"{bin}".split(), stdout=null_file, stderr=log_file, bufsize=1)
myout_file.close()
log_file.close()
def run_all_tests(self):
for kase in range(Runner.run_kases):
Print_C.print_subheader(f"[Running KASE {kase}]")
for testcase in self.testcases:
self.run_single_test(testcase=testcase, kase=kase)
|
38022
|
import datetime
from django.db import models
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
class Guest(models.Model):
"""
A temporary user.
Fields:
``user`` - The temporary user.
``last_used`` - The last time we noted this user doing something.
All users with a record in this model are temporary and should be
deleted after GUEST_DELETE_TIME.
"""
user = models.ForeignKey(User,on_delete=models.CASCADE)
last_used = models.DateTimeField(User)
@classmethod
def create_guest(self, user):
guest = Guest(user=user, last_used=datetime.datetime.now())
return guest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.