filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_24662
|
# Copyright (c) 2016, 2017, 2018, 2019 Chris Cummins.
#
# clgen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# clgen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with clgen. If not, see <https://www.gnu.org/licenses/>.
"""Unit tests for //deeplearning/clgen/sampler.py."""
import typing
import numpy as np
import pytest
from deeplearning.clgen import errors
from deeplearning.clgen import samplers
from deeplearning.clgen.proto import sampler_pb2
from labm8.py import app
from labm8.py import test
FLAGS = app.FLAGS
class AtomizerMock(object):
"""Mock class for atomizer."""
@staticmethod
def AtomizeString(string) -> np.ndarray:
"""Mock for string atomizer"""
del string
return np.array([1])
@staticmethod
def TokenizeString(string) -> typing.List[str]:
"""Mock for string tokenizer."""
del string
return ["a"]
# AssertConfigIsValid() tests.
def test_AssertConfigIsValid_no_start_text(clgen_cache_dir, abc_sampler_config):
"""Test that an error is thrown if start_text field is not set."""
del clgen_cache_dir
# Field not set.
abc_sampler_config.ClearField("start_text")
with test.Raises(errors.UserError) as e_info:
samplers.Sampler(abc_sampler_config)
assert "Sampler.start_text must be a string" == str(e_info.value)
# Value is an empty string.
abc_sampler_config.start_text = ""
with test.Raises(errors.UserError) as e_info:
samplers.Sampler(abc_sampler_config)
assert "Sampler.start_text must be a string" == str(e_info.value)
def test_AssertConfigIsValid_invalid_batch_size(abc_sampler_config):
"""Test that an error is thrown if batch_size is < 1."""
# Field not set.
abc_sampler_config.ClearField("batch_size")
with test.Raises(errors.UserError) as e_info:
samplers.Sampler(abc_sampler_config)
assert "Sampler.batch_size must be > 0" == str(e_info.value)
# Value is zero.
abc_sampler_config.batch_size = 0
with test.Raises(errors.UserError) as e_info:
samplers.Sampler(abc_sampler_config)
assert "Sampler.batch_size must be > 0" == str(e_info.value)
# Value is negative.
abc_sampler_config.batch_size = -1
with test.Raises(errors.UserError) as e_info:
samplers.Sampler(abc_sampler_config)
assert "Sampler.batch_size must be > 0" == str(e_info.value)
def test_AssertConfigIsValid_invalid_temperature_micros(abc_sampler_config):
"""Test that an error is thrown if temperature_micros is < 0."""
# Field not set.
abc_sampler_config.ClearField("temperature_micros")
with test.Raises(errors.UserError) as e_info:
samplers.Sampler(abc_sampler_config)
assert "Sampler.temperature_micros must be > 0" == str(e_info.value)
# Value is negative.
abc_sampler_config.temperature_micros = -1
with test.Raises(errors.UserError) as e_info:
samplers.Sampler(abc_sampler_config)
assert "Sampler.temperature_micros must be > 0" == str(e_info.value)
# MaxlenTerminationCriterion tests.
def test_MaxlenTerminationCriterion_invalid_maximum_tokens_in_sample():
"""Test that error is raised if maximum_tokens_in_sample is invalid."""
config = sampler_pb2.MaxTokenLength()
# Field is missing.
with test.Raises(errors.UserError) as e_info:
samplers.MaxlenTerminationCriterion(config)
assert "MaxTokenLength.maximum_tokens_in_sample must be > 0" == str(
e_info.value
)
# Value is zero.
config.maximum_tokens_in_sample = 0
with test.Raises(errors.UserError) as e_info:
samplers.MaxlenTerminationCriterion(config)
assert "MaxTokenLength.maximum_tokens_in_sample must be > 0" == str(
e_info.value
)
def test_MaxlenTerminationCriterion_SampleIsComplete():
"""Test SampleIsComplete() returns expected values."""
t = samplers.MaxlenTerminationCriterion(
sampler_pb2.MaxTokenLength(maximum_tokens_in_sample=3)
)
assert not t.SampleIsComplete([])
assert not t.SampleIsComplete(["a"])
assert not t.SampleIsComplete(["a", "b"])
assert t.SampleIsComplete(["a", "b", "c"])
assert t.SampleIsComplete(["a", "b", "c", "d"])
assert t.SampleIsComplete(["a", "b", "c", "d", "e"])
# SymmetricalTokenDepthCriterion tests.
def test_SymmetricalTokenDepthCriterion_depth_increase_token():
"""Test that error is raised if depth_increase_token is invalid."""
config = sampler_pb2.SymmetricalTokenDepth(depth_decrease_token="a")
# Field is missing.
with test.Raises(errors.UserError) as e_info:
samplers.SymmetricalTokenDepthCriterion(config)
assert "SymmetricalTokenDepth.depth_increase_token must be a string" == str(
e_info.value
)
# Value is empty.
config.depth_increase_token = ""
with test.Raises(errors.UserError) as e_info:
samplers.SymmetricalTokenDepthCriterion(config)
assert "SymmetricalTokenDepth.depth_increase_token must be a string" == str(
e_info.value
)
def test_SymmetricalTokenDepthCriterion_depth_increase_token():
"""Test that error is raised if depth_increase_token is invalid."""
config = sampler_pb2.SymmetricalTokenDepth(depth_increase_token="a")
# Field is missing.
with test.Raises(errors.UserError) as e_info:
samplers.SymmetricalTokenDepthCriterion(config)
assert "SymmetricalTokenDepth.depth_decrease_token must be a string" == str(
e_info.value
)
# Value is empty.
config.depth_decrease_token = ""
with test.Raises(errors.UserError) as e_info:
samplers.SymmetricalTokenDepthCriterion(config)
assert "SymmetricalTokenDepth.depth_decrease_token must be a string" == str(
e_info.value
)
def test_SymmetricalTokenDepthCriterion_same_tokens():
"""test that error is raised if depth tokens are the same."""
config = sampler_pb2.SymmetricalTokenDepth(
depth_increase_token="a", depth_decrease_token="a"
)
with test.Raises(errors.UserError) as e_info:
samplers.SymmetricalTokenDepthCriterion(config)
assert "SymmetricalTokenDepth tokens must be different" == str(e_info.value)
def test_SymmetricalTokenDepthCriterion_SampleIsComplete():
"""Test SampleIsComplete() returns expected values."""
t = samplers.SymmetricalTokenDepthCriterion(
sampler_pb2.SymmetricalTokenDepth(
depth_increase_token="+", depth_decrease_token="-"
)
)
# Depth 0, incomplete.
assert not t.SampleIsComplete([])
# Depth 1, incomplete.
assert not t.SampleIsComplete(["+"])
# Depth -1, complete.
assert t.SampleIsComplete(["-"])
# Depth 0, complete.
assert t.SampleIsComplete(["+", "-"])
# Depth 1, incomplete.
assert not t.SampleIsComplete(["a", "+", "b", "c"])
# Depth 1, incomplete.
assert not t.SampleIsComplete(["a", "+", "+", "b", "c", "-"])
# Depth 0, complete.
assert t.SampleIsComplete(["a", "+", "-", "+", "b", "c", "-"])
def test_SymmetrcalTokenDepthCriterion_SampleIsComplete_reverse_order():
"""Test that sample is not complete if right token appears before left."""
t = samplers.SymmetricalTokenDepthCriterion(
sampler_pb2.SymmetricalTokenDepth(
depth_increase_token="+", depth_decrease_token="-"
)
)
assert not t.SampleIsComplete(["-", "+"])
assert not t.SampleIsComplete(["-", "a", "b", "c", "+"])
assert t.SampleIsComplete(["-", "a", "b", "c", "+", "+", "-"])
# Sampler tests.
def test_Sampler_config_type_error():
"""Test that a TypeError is raised if config is not a Sampler proto."""
with test.Raises(TypeError) as e_info:
samplers.Sampler(1)
assert "Config must be a Sampler proto. Received: 'int'" == str(e_info.value)
def test_Sampler_start_text(abc_sampler_config: sampler_pb2.Sampler):
"""Test that start_text is set from Sampler proto."""
s = samplers.Sampler(abc_sampler_config)
assert s.start_text == abc_sampler_config.start_text
def test_Sampler_temperature(abc_sampler_config: sampler_pb2.Sampler):
"""Test that temperature is set from Sampler proto."""
abc_sampler_config.temperature_micros = 1000000
s = samplers.Sampler(abc_sampler_config)
assert pytest.approx(1.0) == s.temperature
def test_Sampler_batch_size(abc_sampler_config: sampler_pb2.Sampler):
"""Test that batch_size is set from Sampler proto."""
abc_sampler_config.batch_size = 99
s = samplers.Sampler(abc_sampler_config)
assert 99 == s.batch_size
# Sampler.Specialize() tests.
def test_Sampler_Specialize_invalid_depth_tokens(
abc_sampler_config: sampler_pb2.Sampler,
):
"""Test that InvalidSymtokTokens raised if depth tokens cannot be encoded."""
t = abc_sampler_config.termination_criteria.add()
t.symtok.depth_increase_token = "{"
t.symtok.depth_decrease_token = "}"
s = samplers.Sampler(abc_sampler_config)
def MockAtomizeString(string):
"""AtomizeString() with a vocab error on depth tokens."""
if string == "{" or string == "}":
raise errors.VocabError()
else:
return np.ndarray([1])
mock = AtomizerMock()
mock.AtomizeString = MockAtomizeString
with test.Raises(errors.InvalidSymtokTokens) as e_info:
s.Specialize(mock)
assert (
"Sampler symmetrical depth tokens cannot be encoded using the "
"corpus vocabulary"
) == str(e_info.value)
def test_Sampler_Specialize_multiple_tokens_per(
abc_sampler_config: sampler_pb2.Sampler,
):
"""Test that InvalidSymtokTokens raised if depth tokens encode to mult."""
t = abc_sampler_config.termination_criteria.add()
t.symtok.depth_increase_token = "abc"
t.symtok.depth_decrease_token = "cba"
s = samplers.Sampler(abc_sampler_config)
def MockAtomizeString(string):
"""AtomizeString() with a multi-token output."""
del string
return np.array([1, 2, 3])
mock = AtomizerMock()
mock.AtomizeString = MockAtomizeString
with test.Raises(errors.InvalidSymtokTokens) as e_info:
s.Specialize(mock)
assert (
"Sampler symmetrical depth tokens do not encode to a single "
"token using the corpus vocabulary"
)
def test_Sampler_Specialize_encoded_start_text(
abc_sampler_config: sampler_pb2.Sampler,
):
s = samplers.Sampler(abc_sampler_config)
assert s.encoded_start_text is None
s.Specialize(AtomizerMock())
np.testing.assert_array_equal(np.array([1]), s.encoded_start_text)
if __name__ == "__main__":
test.Main()
|
the-stack_0_24664
|
'''
name: E#01
author: Andrey Plugin
email: [email protected]
link: https://www.youtube.com/channel/UCNN3bpPlWWUkUMB7gjcUFlw
MIT License https://github.com/repen/E-parsers/blob/master/License
'''
import requests
from bs4 import BeautifulSoup
url = "http://light-science.ru/kosmos/vselennaya/top-10-samyh-bolshih-zvezd-vo-vselennoj.html"
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; \
x64; rv:47.0) Gecko/20100101 Firefox/48.0'}
response = requests.get(url, headers=header)
html = response.text
soup = BeautifulSoup(html, "html.parser")
conteiner = soup.find("div", {"class":"td-post-content"})
elements = conteiner.find_all("p")
string = "топ звезд самых больших: \n"
for element in elements:
if element.find("strong"):
string += "\t" + element.strong.text + "\n"
with open("data.txt", "w", encoding="utf8") as f:
f.write(string)
|
the-stack_0_24666
|
# -*- coding: utf-8 -*-
import jwt
import logging
from calendar import timegm
import ckan.plugins as plugins
import ckan.model as model
from ckan.common import config
from ckan.logic.schema import default_create_api_token_schema
from ckan.exceptions import CkanConfigurationException
log = logging.getLogger(__name__)
_config_encode_secret = u"api_token.jwt.encode.secret"
_config_decode_secret = u"api_token.jwt.decode.secret"
_config_secret_fallback = u"beaker.session.secret"
_config_algorithm = u"api_token.jwt.algorithm"
def _get_plugins():
return plugins.PluginImplementations(plugins.IApiToken)
def _get_algorithm():
return config.get(_config_algorithm, u"HS256")
def _get_secret(encode):
config_key = _config_encode_secret if encode else _config_decode_secret
secret = config.get(config_key)
if not secret:
secret = u"string:" + config.get(_config_secret_fallback, u"")
type_, value = secret.split(u":", 1)
if type_ == u"file":
with open(value, u"rb") as key_file:
value = key_file.read()
if not value:
raise CkanConfigurationException(
(
u"Neither `{key}` nor `{fallback}` specified. "
u"Missing secret key is a critical security issue."
).format(
key=config_key, fallback=_config_secret_fallback,
)
)
return value
def into_seconds(dt):
return timegm(dt.timetuple())
def get_schema():
schema = default_create_api_token_schema()
for plugin in _get_plugins():
schema = plugin.create_api_token_schema(schema)
return schema
def postprocess(data, jti, data_dict):
for plugin in _get_plugins():
data = plugin.postprocess_api_token(data, jti, data_dict)
return data
def decode(encoded, **kwargs):
for plugin in _get_plugins():
data = plugin.decode_api_token(encoded, **kwargs)
if data:
break
else:
try:
data = jwt.decode(
encoded,
_get_secret(encode=False),
algorithms=_get_algorithm(),
**kwargs
)
except jwt.InvalidTokenError as e:
# TODO: add signal for performing extra work, like removing
# expired tokens
log.error(u"Cannot decode JWT token: %s", e)
data = None
return data
def encode(data, **kwargs):
for plugin in _get_plugins():
token = plugin.encode_api_token(data, **kwargs)
if token:
break
else:
token = jwt.encode(
data,
_get_secret(encode=True),
algorithm=_get_algorithm(),
**kwargs
)
return token
def add_extra(result):
for plugin in _get_plugins():
result = plugin.add_extra_fields(result)
return result
def get_user_from_token(token, update_access_time=True):
data = decode(token)
if not data:
return
# do preprocessing in reverse order, allowing onion-like
# "unwrapping" of the data, added during postprocessing, when
# token was created
for plugin in reversed(list(_get_plugins())):
data = plugin.preprocess_api_token(data)
if not data or u"jti" not in data:
return
token_obj = model.ApiToken.get(data[u"jti"])
if not token_obj:
return
if update_access_time:
token_obj.touch(True)
return token_obj.owner
|
the-stack_0_24668
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\interactions\rabbit_hole.py
# Compiled at: 2020-08-19 01:09:42
# Size of source mod 2**32: 10547 bytes
import services
from event_testing.tests import TunableTestSet
from interactions.interaction_finisher import FinishingType
from interactions import ParticipantType
from objects import HiddenReasonFlag, ALL_HIDDEN_REASONS
from sims.daycare import DaycareLiability
import placement, sims4
from sims4.tuning.tunable import Tunable, TunableReference, TunableMapping
logger = sims4.log.Logger('HideSimLiability')
HIDE_SIM_LIABILTIY = 'HideSimLiability'
class HideSimLiability(DaycareLiability):
LIABILITY_TOKEN = HIDE_SIM_LIABILTIY
ROUTING_SLAVE_ENTRY_STATE = TunableMapping(description='\n Possible states to set on the routing slave on entry. The state is set if\n its tuned tests pass. The first state with tests that pass will be set.\n ',
key_type=TunableReference(description='\n The state that the routing slave will be put into when their owner is hidden.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.OBJECT_STATE)),
class_restrictions=('ObjectStateValue', ),
pack_safe=True),
key_name='Routing Slave Entry State',
value_type=TunableTestSet(description='\n The result of the tests determines if this state is set.\n '))
ROUTING_SLAVE_EXIT_STATE = TunableMapping(description='\n Possible states to set on the routing slave on entry. The state is set if\n its tuned tests pass. The first state with tests that pass will be set.\n ',
key_type=TunableReference(description='\n The state that the routing slave will be put into when their owner is unhidden.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.OBJECT_STATE)),
class_restrictions=('ObjectStateValue', ),
pack_safe=True),
key_name='Routing Slave Exit State',
value_type=TunableTestSet(description='\n The result of the tests determines if this state is set.\n '))
FACTORY_TUNABLES = {'should_transfer_liabilities': Tunable(description='\n True if the liability should transfer to continuations, False otherwise.\n ',
tunable_type=bool,
default=False)}
def __init__(self, *args, should_transfer_liabilities=False, **kwargs):
(super().__init__)(args, should_transfer_liabilities=should_transfer_liabilities, **kwargs)
self._interaction = None
self._has_hidden = False
def should_transfer(self, continuation):
return self.should_transfer_liabilities
def transfer(self, new_interaction):
super().transfer(new_interaction)
old_routing_slave_participants_set = self._interaction.get_participants(ParticipantType.RoutingSlaves)
new_routing_slave_participants_set = new_interaction.get_participants(ParticipantType.RoutingSlaves)
if old_routing_slave_participants_set != new_routing_slave_participants_set:
logger.error("Mismatch between interaction: {}'s routing slave participants and interaction: {}'s routing slave participants.", self._interaction, new_interaction)
self._interaction = new_interaction
def on_add(self, interaction):
super().on_add(interaction)
self._interaction = interaction
for sim_info in self._sim_infos:
sim = sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)
if sim is not None:
sim.ignore_blocking_near_destination = True
familiar_tracker = sim_info.familiar_tracker
if familiar_tracker is not None:
familiar = familiar_tracker.get_active_familiar()
if familiar is not None:
familiar.ignore_blocking_near_destination = familiar.is_sim or True
def get_sims(self, sim):
if not self._carried_sim_infos:
self._update_carried_participants()
carried_sims = tuple((carried_sim_info.get_sim_instance(allow_hidden_flags=(HiddenReasonFlag.RABBIT_HOLE)) for carried_sim_info in self._carried_sim_infos.get(sim.sim_info, ())))
return tuple((carried_sim for carried_sim in carried_sims if carried_sim is not None)) + (sim,)
def on_run(self):
for sim_info in self._sim_infos:
sim = sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)
if sim is None:
return
sims_to_hide = self.get_sims(sim)
for sim in sims_to_hide:
sim.fade_out()
sim.hide(HiddenReasonFlag.RABBIT_HOLE)
sim.client.selectable_sims.notify_dirty()
valid_sims = (
self._interaction.sim, self._interaction.target) + sims_to_hide
for interaction in tuple(sim.interaction_refs):
if interaction not in sim.interaction_refs:
continue
if interaction.sim in valid_sims:
continue
interaction.cancel((FinishingType.OBJECT_CHANGED), cancel_reason_msg='Target Sim was hidden by the HideSimLiability')
for sim in sims_to_hide:
sim.remove_location_from_quadtree(placement.ItemType.SIM_POSITION)
sim.remove_location_from_quadtree(placement.ItemType.SIM_INTENDED_POSITION)
for routing_slave in self._interaction.get_participants(ParticipantType.RoutingSlaves):
for state_value, tests in self.ROUTING_SLAVE_ENTRY_STATE.items():
if state_value is not None and tests.run_tests(resolver=(self._interaction.get_resolver())):
routing_slave.set_state(state_value.state, state_value)
break
routing_slave.fade_out()
routing_slave.hide(HiddenReasonFlag.RABBIT_HOLE)
routing_slave.remove_location_from_quadtree(placement.ItemType.SIM_POSITION)
routing_slave.remove_location_from_quadtree(placement.ItemType.SIM_INTENDED_POSITION)
self._has_hidden = True
super().on_run()
def release(self):
if not self._has_hidden:
return
for sim_info in self._sim_infos:
sim = sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)
if sim is None:
return
for sim in self.get_sims(sim):
sim.show(HiddenReasonFlag.RABBIT_HOLE)
sim.client.selectable_sims.notify_dirty()
sim.add_location_to_quadtree(placement.ItemType.SIM_POSITION)
sim.fade_in()
for routing_slave in self._interaction.get_participants(ParticipantType.RoutingSlaves):
routing_slave.show(HiddenReasonFlag.RABBIT_HOLE)
routing_slave.add_location_to_quadtree(placement.ItemType.SIM_POSITION)
routing_slave.ignore_blocking_near_destination = False
for state_value, tests in self.ROUTING_SLAVE_EXIT_STATE.items():
if state_value is not None and tests.run_tests(resolver=(self._interaction.get_resolver())):
routing_slave.set_state(state_value.state, state_value)
break
routing_slave.fade_in()
self._has_hidden = False
sim.ignore_blocking_near_destination = False
super().release()
|
the-stack_0_24669
|
from typing import Optional, Tuple
import tensorflow as tf
from imagemodel.common.utils.tf_images import decode_png
from imagemodel.experimental.reference_tracking.dataset_providers.rt_drafter import RTDrafterP, RTDrafterT
class RTCellTrackingDrafterT(RTDrafterT):
"""
Examples
--------
>>> import os
>>>
>>> base_folder = "/data/tracking_training"
>>> main_image_folder: str = os.path.join(base_folder, "framed_image", "zero")
>>> main_label_folder: str = os.path.join(base_folder, "framed_label", "zero")
>>> main_bw_label_folder: str = os.path.join(base_folder, "framed_bw_label", "zero")
>>> ref_image_folder: str = os.path.join(base_folder, "framed_image", "p1")
>>> ref_label_folder: str = os.path.join(base_folder, "framed_label", "p1")
>>> ref_bw_label_folder: str = os.path.join(base_folder, "framed_bw_label", "p1")
>>> folders = (
... main_image_folder,
... ref_image_folder,
... main_label_folder,
... ref_label_folder,
... main_bw_label_folder,
... ref_bw_label_folder)
...
>>> from imagemodel.experimental.reference_tracking.dataset_providers.cell_tracking_dataset.\
... rt_cell_tracking_drafter import RTCellTrackingDrafterT
>>> dt = RTCellTrackingDrafterT(folders, shuffle_for_trainer=True, shuffle=False, random_seed=42)
>>> for d in dt.out_dataset.take(1):
... print(d)
...
"""
def __init__(
self,
folders: Tuple[str, str, str, str, str, str],
shuffle_for_trainer: bool,
shuffle: bool,
random_seed: Optional[int]):
self.filename_base_folder: str = folders[0]
self.folders: Tuple[str, str, str, str, str, str] = folders
self.shuffle_for_trainer: bool = shuffle_for_trainer
self.shuffle: bool = shuffle
self.random_seed: Optional[int] = random_seed
def get_filename_dataset(self) -> tf.data.Dataset:
def get_filename_from_fullpath(name):
return tf.strings.split(name, sep="/")[-1]
file_folder_dataset = tf.data.Dataset.list_files(
self.filename_base_folder + "/*",
shuffle=self.shuffle,
seed=self.random_seed)
filename_dataset = file_folder_dataset.map(
get_filename_from_fullpath,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return filename_dataset
def __to_file_folder_dataset(self, filename: tf.Tensor) -> \
Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
def __combine_folder_file(a, b):
return a + "/" + b
return (
__combine_folder_file(self.folders[0], filename),
__combine_folder_file(self.folders[1], filename),
__combine_folder_file(self.folders[2], filename),
__combine_folder_file(self.folders[3], filename),
__combine_folder_file(self.folders[4], filename),
__combine_folder_file(self.folders[5], filename))
def __load_image(
self,
main_image_file_folder: str,
ref_image_file_folder: str,
main_label_folder: str,
ref_label_folder: str,
main_bw_label_folder: str,
ref_bw_label_folder: str):
return (
decode_png(main_image_file_folder),
decode_png(ref_image_file_folder),
decode_png(main_label_folder, 3),
decode_png(ref_label_folder, 3),
decode_png(main_bw_label_folder),
decode_png(ref_bw_label_folder))
@property
def out_dataset(self) -> tf.data.Dataset:
filename_dataset = self.get_filename_dataset()
# TODO: Resolves an issue where memory usage increases without limit when using shuffle.
if self.shuffle_for_trainer:
# filename_dataset = filename_dataset.shuffle(len(filename_dataset))
filename_dataset = filename_dataset.shuffle(512)
file_folder_dataset = filename_dataset.map(
self.__to_file_folder_dataset,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
image_dataset = file_folder_dataset.map(self.__load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return image_dataset
class RTCellTrackingDrafterP(RTDrafterP):
"""
Examples
--------
>>> import os
>>>
>>> base_folder = "/data/tracking_training"
>>> main_image_folder: str = os.path.join(base_folder, "framed_image", "zero")
>>> ref_image_folder: str = os.path.join(base_folder, "framed_image", "p1")
>>> ref_label_folder: str = os.path.join(base_folder, "framed_label", "p1")
>>> filename_folder: str = "/data/tracking_test2/framed_sample"
>>> folders = (
... main_image_folder,
... ref_image_folder,
... ref_label_folder)
...
>>> from imagemodel.experimental.reference_tracking.dataset_providers.cell_tracking_dataset.\
... rt_cell_tracking_drafter import RTCellTrackingDrafterP
>>> dt = RTCellTrackingDrafterP(filename_folder, folders, False, 42)
>>> for d in dt.out_dataset.take(1):
... print(d)
...
"""
def __init__(
self,
filename_folder: Optional[str],
folders: Tuple[str, str, str],
shuffle: bool,
random_seed: Optional[int]):
self.filename_base_folder = filename_folder or folders[0]
self.folders = folders
self.shuffle = shuffle
self.random_seed = random_seed
def get_base_file_folder_dataset(self) -> tf.data.Dataset:
return tf.data.Dataset.list_files(
self.filename_base_folder + "/*",
shuffle=self.shuffle,
seed=self.random_seed)
def __to_filename_dataset(self, file_folder: str) -> tf.data.Dataset:
def get_filename_from_fullpath(name):
return tf.strings.split(name, sep="/")[-1]
return get_filename_from_fullpath(file_folder)
def __to_file_folder_dataset(self, filename: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
def __combine_folder_file(a, b):
return a + "/" + b
return (
filename,
__combine_folder_file(self.folders[0], filename),
__combine_folder_file(self.folders[1], filename),
__combine_folder_file(self.folders[2], filename))
def __load_image(
self,
filename: tf.Tensor,
main_image_file_folder: str,
ref_image_file_folder: str,
ref_label_folder: str):
return (
filename,
decode_png(main_image_file_folder),
decode_png(ref_image_file_folder),
decode_png(ref_label_folder, 3))
@property
def out_dataset(self) -> tf.data.Dataset:
base_file_folder_dataset = self.get_base_file_folder_dataset()
filename_dataset = base_file_folder_dataset.map(
self.__to_filename_dataset,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
file_folder_dataset = filename_dataset.map(
self.__to_file_folder_dataset,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
image_dataset = file_folder_dataset.map(self.__load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return image_dataset
|
the-stack_0_24670
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import pytest
import numpy as np
import pyflann
VALID_INT_TYPES = (
np.sctypeDict['int64'],
np.sctypeDict['int32'],
np.sctypeDict['uint8'],
np.dtype('int32'),
np.dtype('uint8'),
np.dtype('int64'),
)
def is_int_type(dtype):
return dtype in VALID_INT_TYPES
def rand_vecs(num, dim, rng=np.random, dtype=np.uint8):
if is_int_type(dtype):
return (rng.rand(num, dim) * 255).astype(dtype)
else:
return (rng.rand(num, dim)).astype(dtype)
class Test_PyFlann_add_remove(unittest.TestCase):
def setUp(self):
pass
def test_add_loop(self):
"""
Test add_points using a loop when the added data goes out of scope.
"""
data_dim = 128
num_qpts = 100
num_dpts = 1000
random_seed = 42
rng = np.random.RandomState(0)
dataset = rand_vecs(num_dpts, data_dim, rng)
testset = rand_vecs(num_qpts, data_dim, rng)
# Build determenistic flann object
flann = pyflann.FLANN()
params = flann.build_index(
dataset, algorithm='kdtree', trees=4, random_seed=random_seed
)
# Add points in a loop where new_pts goes out of scope
num_iters = 100
for count in range(num_iters):
new_pts = rand_vecs(200, data_dim, rng)
flann.add_points(new_pts, 2)
# query to ensure that at least some of the new points are in the results
num_extra = 200
num_neighbs = num_dpts + num_extra
result1, _ = flann.nn_index(testset, num_neighbs, checks=params['checks'])
self.assertTrue(
(result1 > num_dpts).sum() >= num_qpts * num_extra,
'at least some of the returned points should be from the added set',
)
@pytest.mark.skip(reason='unused feature')
def test_add(self):
"""
Test simple case of add_points
"""
data_dim = 128
num_dpts = 1000
num_qpts = 100
num_neighbs = 5
random_seed = 42
rng = np.random.RandomState(0)
dataset = rand_vecs(num_dpts, data_dim, rng)
testset = rand_vecs(num_qpts, data_dim, rng)
# Build determenistic flann object
flann = pyflann.FLANN()
params = flann.build_index(
dataset, algorithm='kdtree', trees=4, random_seed=random_seed
)
# check nearest neighbor search before add, should be all over the place
result1, _ = flann.nn_index(testset, num_neighbs, checks=params['checks'])
# Add points
flann.add_points(testset, 2)
# check nearest neighbor search after add
result2, _ = flann.nn_index(testset, num_neighbs, checks=params['checks'])
# print('Neighbor results should be between %d and %d' % (num_dpts, num_dpts + num_qpts))
self.assertTrue(
np.all(result2.T[0] >= num_dpts), 'new points should be found first'
)
self.assertTrue(
(result2.T[1] == result1.T[0]).sum() > result1.shape[0] / 2,
'most old points should be found next',
)
@pytest.mark.skip(reason='unused feature')
def test_remove(self):
"""
Test simple case of remove points
"""
data_dim = 128
num_dpts = 1000
num_neighbs = 5
random_seed = 42
rng = np.random.RandomState(0)
dataset = rand_vecs(num_dpts, data_dim, rng)
rng = np.random.RandomState(0)
# Build determenistic flann object
flann = pyflann.FLANN()
params = flann.build_index(
dataset, algorithm='kdtree', trees=4, random_seed=random_seed
)
# check nearest neighbor search before add, should be all over the place
result1, _ = flann.nn_index(dataset, num_neighbs, checks=params['checks'])
data_ids = np.arange(0, dataset.shape[0])
check1 = result1.T[0] == data_ids
self.assertTrue(np.all(check1), 'self query should result in consecutive results')
# Remove half of the data points
for id_ in range(0, num_dpts, 2):
flann.remove_point(id_)
result2, _ = flann.nn_index(dataset, num_neighbs, checks=params['checks'])
check2_odd = result2.T[0][1::2] == data_ids[1::2]
check2_even = result2.T[0][0::2] == data_ids[0::2]
self.assertTrue(
np.all(check2_odd),
'unremoved points should have unchanged neighbors',
)
self.assertTrue(
not np.any(check2_even),
'removed points should have different neighbors',
)
def test_used_memory(self):
"""
Simple test to make sure the used_memory binding works
"""
data_dim = 128
num_dpts = 1000
num_qpts = 100
num_neighbs = 5
random_seed = 42
rng = np.random.RandomState(0)
dataset = rand_vecs(num_dpts, data_dim, rng)
testset = rand_vecs(num_qpts, data_dim, rng)
# Build determenistic flann object
flann = pyflann.FLANN()
params = flann.build_index(
dataset, algorithm='kdtree', trees=4, random_seed=random_seed
)
# check nearest neighbor search before add, should be all over the place
result1, _ = flann.nn_index(testset, num_neighbs, checks=params['checks'])
prev_index_memory = flann.used_memory()
prev_data_memory = flann.used_memory_dataset()
# Add points
flann.add_points(testset, 2)
# check memory after add points
post_index_memory = flann.used_memory()
post_data_memory = flann.used_memory_dataset()
index_memory_diff = post_index_memory - prev_index_memory
data_memory_diff = post_data_memory - prev_data_memory
self.assertTrue(index_memory_diff > 0, 'add points should increase memory usage')
self.assertTrue(data_memory_diff > 0, 'add points should increase memory usage')
if __name__ == '__main__':
"""
CommandLine:
python test/test_add_remove.py
"""
unittest.main()
|
the-stack_0_24673
|
import requests
import gzip
import constant
import xml.etree.ElementTree as Et
import debian.deb822 as deb822
from database import Database
from deb import Deb
def get_debian_sources(codename):
url = 'http://ftp.debian.org/debian/dists/' + codename + '/main/source/Sources.gz'
r = requests.get(url)
return gzip.decompress(r.content)
def add_elbe_xml(project_id):
db = Database()
c = db.connect()
d = Deb()
sql = "SELECT uploaded_file " \
"FROM project " \
"WHERE id = '%s'" % project_id
file_name = db.read_single_record(c, sql)
sql = "SELECT users_id " \
"FROM project " \
"WHERE id = '%s'" % project_id
user_id = db.read_single_record(c, sql)
result = get_project_name(project_id)
f = constant.UPLOAD_PATH + str(user_id["users_id"]) + "/" + result["project_name"] + "/" + str(
file_name["uploaded_file"])
tree = Et.parse(f)
root = tree.getroot()
distribution = Et.ElementTree(file=f).find('.//project/suite').text
sources = get_debian_sources(distribution)
for pkg in root.findall("./fullpkgs/pkg"):
# convert binary Package name to source Package name
for content in deb822.Sources.iter_paragraphs(sources):
binary_packages = content['Binary']
package_list = binary_packages.split(", ")
if pkg.text in package_list:
source_package_name = content['Package']
# check if we already have an entry in the db
sql = "SELECT * " \
"FROM debian_package " \
"WHERE debian_package_name = '%s' " \
"AND version = '%s' " \
"AND distribution = '%s' " \
"AND project_id = '%s'" \
% (source_package_name, pkg.attrib["version"], distribution, project_id)
result = db.read_all_records(c, sql)
if not result:
sql = "INSERT INTO debian_package " \
"(debian_package_name, version, distribution, project_id, is_vulnerable) " \
"VALUES ('%s', '%s', '%s', '%s', '%s')" \
% (source_package_name, pkg.attrib["version"], distribution, project_id, "false")
db.create_record(c, sql)
# do a check for each package when initially adding packages from an elbe xml
d.check_package(source_package_name, pkg.attrib["version"], distribution, project_id)
break
def get_project_id(project_name):
db = Database()
c = db.connect()
sql = "SELECT id " \
"FROM project " \
"WHERE project_name = '%s'" % project_name
return db.read_single_record(c, sql)
def get_project_name(project_id):
db = Database()
c = db.connect()
sql = "SELECT project_name " \
"FROM project " \
"WHERE id = '%s'" % project_id
return db.read_single_record(c, sql)
def get_all_projects():
db = Database()
c = db.connect()
sql = "SELECT id, operating_system " \
"FROM project"
return db.read_all_records(c, sql)
def get_cve_id(cve):
db = Database()
c = db.connect()
sql = "SELECT id " \
"FROM debian_cve " \
"WHERE cve_id = '%s'" % cve
return db.read_single_record(c, sql)
def setStatus(project_id, value):
db = Database()
c = db.connect()
sql = "UPDATE project " \
"SET status_cve_check = '%s'" \
"WHERE id = '%s'" % (value, project_id)
db.write_single_record(c, sql)
def getRecipient(project_id):
db = Database()
c = db.connect()
sql = "SELECT email " \
"FROM users " \
"WHERE id = (SELECT users_id FROM project WHERE id = '%s')" % project_id
return db.read_single_record(c, sql)
def setVulnerable(os, project_id, package_name, version):
db = Database()
c = db.connect()
if os == "Debian":
sql = "UPDATE debian_package " \
"SET is_vulnerable = '%s' " \
"WHERE project_id = '%s' " \
"AND debian_package_name = '%s' " \
"AND version = '%s' " \
% ("true", project_id, package_name, version)
db.write_single_record(c, sql)
elif os == "Yocto":
sql = "UPDATE yocto_package " \
"SET is_vulnerable = '%s' " \
"WHERE project_id = '%s' " \
"AND yocto_package_name = '%s' " \
"AND version = '%s' " \
% ("true", project_id, package_name, version)
db.write_single_record(c, sql)
|
the-stack_0_24674
|
# Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor
from zenml.integrations.constants import PYTORCH
from zenml.pipelines import pipeline
from zenml.steps import Output, step
# Get cpu or gpu device for training.
device = "cuda" if torch.cuda.is_available() else "cpu"
# Define model
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28 * 28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
@step
def importer_mnist() -> Output(
train_dataloader=DataLoader,
test_dataloader=DataLoader,
):
"""Download the Fashion MNIST dataset."""
# Download training data from open datasets.
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)
# Download test data from open datasets.
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)
batch_size = 64
# Create dataloaders.
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
return train_dataloader, test_dataloader
@step
def trainer(train_dataloader: DataLoader) -> nn.Module:
"""Trains on the train dataloader"""
model = NeuralNetwork().to(device)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
size = len(train_dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(train_dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
return model
@step
def evaluator(test_dataloader: DataLoader, model: nn.Module) -> float:
"""Evaluates on the model."""
loss_fn = nn.CrossEntropyLoss()
size = len(test_dataloader.dataset)
num_batches = len(test_dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in test_dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(
f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n"
)
return 100 * correct
# Define the pipeline
@pipeline(required_integrations=[PYTORCH])
def fashion_mnist_pipeline(
importer,
trainer,
evaluator,
):
"""Link all the steps and artifacts together"""
train_dataloader, test_dataloader = importer()
model = trainer(train_dataloader)
evaluator(test_dataloader=test_dataloader, model=model)
if __name__ == "__main__":
# Initialize a pipeline run
p = fashion_mnist_pipeline(
importer=importer_mnist(),
trainer=trainer(),
evaluator=evaluator(),
)
# Run the pipeline
p.run()
|
the-stack_0_24676
|
from threading import Thread
import socket
def send():
data = input()
while data != '/exit':
sock.send(bytes(data, "utf-8"))
data = input()
sock.close()
def receive():
while True:
try:
data = sock.recv(1024)
print(data.decode('utf-8'))
except OSError:
exit()
if __name__ == "__main__":
sock = socket.socket()
sock.connect(('localhost', 44444))
th_send, th_receive = Thread(target=send), Thread(target=receive)
th_send.start(), th_receive.start()
|
the-stack_0_24677
|
#!/usr/bin/env vpython
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def AddBenchmarkCommandLineArgs(parser):
parser.add_option('--user-agent', action='store', type='string',
default=None, help='Options are mobile and desktop.')
parser.add_option('--archive-data-file',
action='store',
type='string',
default=None,
help='The location of the WPR JSON archive file.')
parser.add_option('--urls-list', action='store', type='string',
default=None,
help='This is a comma separated list of urls. '
'Eg: http://www.google.com,http://www.gmail.com')
def ValidateCommandLineArgs(parser, args):
if not args.user_agent:
parser.error('Please specify --user-agent.')
if not args.archive_data_file and not args.use_live_sites:
parser.error('Please specify --archive-data-file.')
if not args.urls_list:
parser.error('Please specify --urls-list.')
|
the-stack_0_24678
|
from absl import app
from absl import flags
import erdos
import pylot.flags
import pylot.operator_creator
import pylot.utils
from pylot.control.messages import ControlMessage
FLAGS = flags.FLAGS
flags.DEFINE_bool('compute_detection_decay', False,
'True to enable ground truth object detection evaluation.')
flags.DEFINE_bool('compute_segmentation_decay', False,
'True to enable ground truth segmentation evaluation')
flags.DEFINE_integer('decay_max_latency', 400,
'Max latency to evaluate in ground truth experiments')
# The location of the center camera relative to the ego-vehicle.
CENTER_CAMERA_LOCATION = pylot.utils.Location(1.0, 0.0, 1.8)
class SynchronizerOperator(erdos.Operator):
def __init__(self, wait_stream, control_stream, flags):
erdos.add_watermark_callback([wait_stream], [control_stream],
self.on_watermark)
self._flags = flags
@staticmethod
def connect(wait_stream):
# Set no watermark on the output stream so that we do not
# close the watermark loop with the simulator bridge operator.
control_stream = erdos.WriteStream()
return [control_stream]
def on_watermark(self, timestamp, control_stream):
# The control message is ignored by the bridge operator because
# data gathering is conducted using auto pilot. Send default control
# message.
control_msg = ControlMessage(0, 0, 0, False, False, timestamp)
control_stream.send(control_msg)
def main(argv):
""" Computes ground obstacle detection and segmentation decay."""
transform = pylot.utils.Transform(CENTER_CAMERA_LOCATION,
pylot.utils.Rotation())
control_loop_stream = erdos.LoopStream()
release_sensor_stream = erdos.IngestStream()
(
pose_stream,
pose_stream_for_control,
ground_traffic_lights_stream,
ground_obstacles_stream,
ground_speed_limit_signs_stream,
ground_stop_signs_stream,
vehicle_id_stream,
open_drive_stream,
global_trajectory_stream,
) = pylot.operator_creator.add_simulator_bridge(control_loop_stream,
release_sensor_stream)
# Add camera sensors.
(center_camera_stream, notify_rgb_stream,
rgb_camera_setup) = pylot.operator_creator.add_rgb_camera(
transform, vehicle_id_stream, release_sensor_stream)
(depth_camera_stream, notify_depth_stream,
depth_camera_setup) = pylot.operator_creator.add_depth_camera(
transform, vehicle_id_stream, release_sensor_stream)
(segmented_stream, _,
_) = pylot.operator_creator.add_segmented_camera(transform,
vehicle_id_stream,
release_sensor_stream)
map_stream = None
if FLAGS.compute_detection_decay:
obstacles_stream = pylot.operator_creator.add_perfect_detector(
depth_camera_stream, center_camera_stream, segmented_stream,
pose_stream, ground_obstacles_stream,
ground_speed_limit_signs_stream, ground_stop_signs_stream)
map_stream = pylot.operator_creator.add_detection_decay(
obstacles_stream)
iou_stream = None
if FLAGS.compute_segmentation_decay:
iou_stream = pylot.operator_creator.add_segmentation_decay(
segmented_stream)
# TODO: Hack! We synchronize on a single stream, based on a guesestimated
# of which stream is slowest. Instead, We should synchronize on all output
# streams, and we should ensure that even the operators without output
# streams complete.
if FLAGS.control == 'simulator_auto_pilot':
stream_to_sync_on = iou_stream
if map_stream is not None:
stream_to_sync_on = map_stream
op_config = erdos.OperatorConfig(name='synchronizer_operator',
flow_watermarks=False)
(control_stream, ) = erdos.connect(SynchronizerOperator, op_config,
[stream_to_sync_on], FLAGS)
control_loop_stream.set(control_stream)
else:
raise ValueError(
"Must be in auto pilot mode. Pass --control=simulator_auto_pilot")
erdos.run_async()
# Ask all sensors to release their data.
release_sensor_stream.send(
erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
if __name__ == '__main__':
app.run(main)
|
the-stack_0_24680
|
import cadquery as cq
from paramak import Shape
class SweepCircleShape(Shape):
"""Sweeps a 2D circle of a defined radius along a defined spline path to
create a 3D CadQuery solid. Note, some variation in the cross-section of
the solid may occur.
Args:
radius (float): Radius of 2D circle to be swept.
path_points (list of tuples each containing X (float), Z (float)): A
list of XY, YZ or XZ coordinates connected by spline connections
which define the path along which the 2D shape is swept.
workplane (str, optional): Workplane in which the circle to be swept
is defined. Defaults to "XY".
path_workplane (str, optional): Workplane in which the spline path is
defined. Defaults to "XZ".
stp_filename (str, optional): Defaults to "SweepCircleShape.stp".
stl_filename (str, optional): Defaults to "SweepCircleShape.stl".
force_cross_section (bool, optional): If True, cross-section of solid
is forced to be shape defined by points in workplane at each
path_point. Defaults to False.
"""
def __init__(
self,
radius,
path_points,
workplane="XY",
path_workplane="XZ",
stp_filename="SweepCircleShape.stp",
stl_filename="SweepCircleShape.stl",
force_cross_section=False,
**kwargs
):
super().__init__(
workplane=workplane,
stp_filename=stp_filename,
stl_filename=stl_filename,
**kwargs
)
self.radius = radius
self.path_points = path_points
self.path_workplane = path_workplane
self.force_cross_section = force_cross_section
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
self._radius = value
@property
def path_points(self):
return self._path_points
@path_points.setter
def path_points(self, value):
self._points = value
self._path_points = value
@property
def path_workplane(self):
return self._path_workplane
@path_workplane.setter
def path_workplane(self, value):
if value[0] != self.workplane[0]:
raise ValueError(
"workplane and path_workplane must start with the same letter"
)
elif value == self.workplane:
raise ValueError(
"workplane and path_workplane must be different"
)
else:
self._path_workplane = value
def create_solid(self):
"""Creates a swept 3D solid from a 2D circle.
Returns:
A CadQuery solid: A 3D solid volume
"""
path = cq.Workplane(self.path_workplane).spline(self.path_points)
factor = 1
if self.workplane in ["XZ", "YX", "ZY"]:
factor *= -1
if self.force_cross_section:
solid = cq.Workplane(self.workplane).moveTo(0, 0)
for point in self.path_points[:-1]:
solid = solid.workplane(offset=point[1] * factor).\
moveTo(point[0], 0).\
circle(self.radius).\
moveTo(0, 0).\
workplane(offset=-point[1] * factor)
solid = solid.workplane(offset=self.path_points[-1][1] * factor).moveTo(
self.path_points[-1][0], 0).circle(self.radius).sweep(path, multisection=True)
if not self.force_cross_section:
solid = (
cq.Workplane(self.workplane)
.workplane(offset=self.path_points[0][1] * factor)
.moveTo(self.path_points[0][0], 0)
.workplane()
.circle(self.radius)
.moveTo(-self.path_points[0][0], 0)
.workplane(offset=-self.path_points[0][1] * factor)
.workplane(offset=self.path_points[-1][1] * factor)
.moveTo(self.path_points[-1][0], 0)
.workplane()
.circle(self.radius)
.sweep(path, multisection=True)
)
solid = self.rotate_solid(solid)
solid = self.perform_boolean_operations(solid)
self.solid = solid
return solid
|
the-stack_0_24681
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy import cosmology
from astropy.cosmology import Cosmology, Planck18, realizations
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Parameter
from astropy.cosmology.io.table import from_table, to_table
from astropy.cosmology.parameters import available
from astropy.table import QTable, Table, vstack
from .base import IOTestMixinBase, IOFormatTestBase
cosmo_instances = [getattr(realizations, name) for name in available]
cosmo_instances.append("TestToFromTable.setup.<locals>.CosmologyWithKwargs")
###############################################################################
class ToFromTableTestMixin(IOTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="astropy.table"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_table_bad_index(self, from_format, to_format):
"""Test if argument ``index`` is incorrect"""
tbl = to_format("astropy.table")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
from_format(tbl, index=2, format="astropy.table")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
from_format(tbl, index="row 0", format="astropy.table")
# -----------------------
def test_to_table_failed_cls(self, to_format):
"""Test failed table type."""
with pytest.raises(TypeError, match="'cls' must be"):
to_format('astropy.table', cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
def test_to_table_cls(self, to_format, tbl_cls):
tbl = to_format('astropy.table', cls=tbl_cls)
assert isinstance(tbl, tbl_cls) # test type
# -----------------------
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_table_in_meta(self, cosmo_cls, to_format, in_meta):
"""Test where the cosmology class is placed."""
tbl = to_format('astropy.table', cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
if in_meta:
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.colnames # not also a column
else:
assert tbl["cosmology"][0] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.meta
# -----------------------
def test_tofrom_table_instance(self, cosmo_cls, cosmo, from_format, to_format):
"""Test cosmology -> astropy.table -> cosmology."""
# ------------
# To Table
tbl = to_format("astropy.table")
assert isinstance(tbl, QTable)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl["name"] == cosmo.name
assert tbl.indices # indexed!
# ------------
# From Table
tbl["mismatching"] = "will error"
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(tbl, format="astropy.table")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(tbl, format="astropy.table")
# unless mismatched are moved to meta
got = from_format(tbl, format="astropy.table", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
tbl.remove_column("mismatching")
got = from_format(tbl, format="astropy.table")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``to_format``.
tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]]
got = from_format(tbl, format="astropy.table")
assert got == cosmo
# also it auto-identifies 'format'
got = from_format(tbl)
assert got == cosmo
def test_fromformat_table_subclass_partial_info(self, cosmo_cls, cosmo,
from_format, to_format):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
# test to_format
tbl = to_format("astropy.table")
assert isinstance(tbl, QTable)
# partial information
tbl.meta.pop("cosmology", None)
del tbl["Tcmb0"]
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.from_format(tbl, format="astropy.table")
got2 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls)
got3 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
@pytest.mark.parametrize("add_index", [True, False])
def test_tofrom_table_mutlirow(self, cosmo_cls, cosmo, to_format, from_format, add_index):
"""Test if table has multiple rows."""
# ------------
# To Table
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
tbl = vstack([c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts='silent')
assert isinstance(tbl, QTable)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl[1]["name"] == cosmo.name
# whether to add an index. `from_format` can work with or without.
if add_index:
tbl.add_index("name", unique=True)
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
from_format(tbl, format="astropy.table")
# unless the index argument is provided
got = from_format(tbl, index=1, format="astropy.table")
assert got == cosmo
# the index can be a string
got = from_format(tbl, index=cosmo.name, format="astropy.table")
assert got == cosmo
# when there's more than one cosmology found
tbls = vstack([tbl, tbl], metadata_conflicts="silent")
with pytest.raises(ValueError, match="more than one"):
from_format(tbls, index=cosmo.name, format="astropy.table")
class TestToFromTable(IOFormatTestBase, ToFromTableTestMixin):
"""Directly test ``to/from_table``."""
def setup_class(self):
self.functions = {"to": to_table, "from": from_table}
|
the-stack_0_24682
|
def download_file_from_google_drive(id, destination):
'''
file_id_map = {
"train_identity.csv.zip":"12PbACvaeU7htbS5jjdBM_gB0z_S7Kjnb",
"train_transaction.csv.zip":"1OdGYi4Z4JKhx2aMjcB2ugQx8OiXp-ZoI",
"test_transaction.csv.zip":"1LtM_49Y2QEjSBdpmODMvJmbZhX4-MOQc",
"test_identity.csv.zip":"1QxyUrDv_ZPlkLw7Ohqech1CaYKaOKuER",
"sample_submission.csv.zip":"1kh7clvggqpIKtKWAJn5d9xe1RCG1A3nE",
}
for destination ,file_id in file_id_map.items():
download_file_from_google_drive(file_id, destination)
'''
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
|
the-stack_0_24685
|
from datetime import datetime
from pathlib import Path
from utils import safe_open
class EnvFile:
def __init__(self, header, path):
self.header = header
self.path = Path(path)
self.variables = {}
if self.is_file():
self.variables = self.read()
def is_file(self):
return self.path.is_file()
def read(self):
with safe_open(self.path, "r") as f:
return parse_env_file(f.read())
def save(self):
lines = []
lines.append("# %s" % self.header)
lines.append("# %s\n" % datetime.now())
for key in sorted(self.variables.keys()):
value = self.variables[key]
lines.append("%s=%s" % (key, value))
with safe_open(self.path, "w") as f:
f.write("\n".join(lines))
def __setitem__(self, key, item):
self.variables[key] = item
def __getitem__(self, key):
return self.variables[key]
def get(self, name, default=None):
return self.variables.get(name, default)
def parse_env_file(file_contents):
variables = {}
for line in file_contents.splitlines():
stripped_line = line.strip()
if not stripped_line:
continue
if stripped_line.startswith("#"):
continue
if "=" not in stripped_line:
continue
assignment_position = stripped_line.find("=")
key = stripped_line[:assignment_position]
value = stripped_line[assignment_position + 1 :].strip('"')
variables[key] = value
return variables
|
the-stack_0_24687
|
import torch
import argparse
import time
from sklearn.metrics import confusion_matrix
from rubiksnet.dataset import RubiksDataset, return_dataset
from rubiksnet.models import RubiksNet
from rubiksnet.transforms import *
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def main():
assert torch.cuda.is_available(), "CUDA must be available to run this example"
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(
description="RubiksNet testing on the full validation set"
)
parser.add_argument("dataset", type=str)
parser.add_argument(
"-p", "--pretrained", type=str, required=True, help="pretrained checkpoint path"
)
parser.add_argument(
"--root-path",
type=str,
default="./",
help="we assume the dataset to be located at <root_path>/<dataset_name>",
)
parser.add_argument(
"--frames",
type=int,
default=8,
help="number of video frames to be passed to the network as a single clip",
)
parser.add_argument(
"--two-clips",
action="store_true",
help='enable "two clip evaluation" protocol.',
)
parser.add_argument("--batch-size", type=int, default=80)
parser.add_argument(
"-j",
"--workers",
default=8,
type=int,
metavar="N",
help="number of data loading workers (default: 8)",
)
parser.add_argument("--gpus", nargs="+", type=int, default=None)
args = parser.parse_args()
(num_classes, args.train_list, val_list, root_path, prefix,) = return_dataset(
args.dataset, args.root_path
)
print(f"=> dataset: {args.dataset}")
print(f"=> root_path: {args.root_path}")
print(f"=> num_classes: {num_classes}")
net = RubiksNet.load_pretrained(args.pretrained)
print(f"=> tier: {net.tier}")
print(f"=> variant: {net.variant}")
if args.two_clips:
twice_sample = True
test_crops = 3
else:
twice_sample = False
test_crops = 1
print(f"=> eval mode: {'2-clip' if args.two_clips else '1-clip'}")
if test_crops == 1:
cropping = torchvision.transforms.Compose(
[GroupScale(net.scale_size), GroupCenterCrop(net.input_size),]
)
elif test_crops == 3: # do not flip, so only 5 crops
cropping = torchvision.transforms.Compose(
[GroupFullResSample(net.input_size, net.scale_size, flip=False)]
)
elif test_crops == 5: # do not flip, so only 5 crops
cropping = torchvision.transforms.Compose(
[GroupOverSample(net.input_size, net.scale_size, flip=False)]
)
elif test_crops == 10:
cropping = torchvision.transforms.Compose(
[GroupOverSample(net.input_size, net.scale_size)]
)
else:
raise ValueError(
f"Only 1, 5, 10 crops are supported while we got {test_crops}."
)
data_loader = torch.utils.data.DataLoader(
RubiksDataset(
root_path,
val_list,
num_segments=args.frames,
new_length=1,
image_tmpl=prefix,
test_mode=True,
remove_missing=True,
transform=torchvision.transforms.Compose(
[
cropping,
Stack(roll=False),
ToTorchFormatTensor(div=True),
GroupNormalize(net.input_mean, net.input_std),
]
),
dense_sample=False,
twice_sample=twice_sample,
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
if args.gpus is None:
args.gpus = list(range(torch.cuda.device_count()))
net = torch.nn.DataParallel(net.cuda(args.gpus[0]), device_ids=args.gpus)
net.eval()
output = []
proc_start_time = time.time()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
for i, (data, label) in enumerate(data_loader):
batch_size = label.numel()
num_crop = test_crops
if twice_sample:
num_crop *= 2
data_in = data.view(-1, 3, data.size(2), data.size(3))
data_in = data_in.view(
batch_size * num_crop, args.frames, 3, data_in.size(2), data_in.size(3),
)
rst = net(data_in)
rst = rst.reshape(batch_size, num_crop, -1).mean(1)
rst = rst.data.cpu().numpy().copy()
rst = rst.reshape(batch_size, num_classes)
for p, g in zip(rst, label.cpu().numpy()):
output.append([p[None, ...], g])
cnt_time = time.time() - proc_start_time
prec1, prec5 = accuracy(torch.from_numpy(rst), label, topk=(1, 5))
top1.update(prec1.item(), label.numel())
top5.update(prec5.item(), label.numel())
if i % 20 == 0:
print(
f"video {i * args.batch_size} done, total {i * args.batch_size}/{len(data_loader.dataset)}, "
f"average {float(cnt_time) / (i+1) / args.batch_size:.3f} sec/video, "
f"moving Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}"
)
video_pred = [np.argmax(x[0]) for x in output]
video_pred_top5 = [
np.argsort(np.mean(x[0], axis=0).reshape(-1))[::-1][:5] for x in output
]
video_labels = [x[1] for x in output]
cf = confusion_matrix(video_labels, video_pred).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
print("\n====================== Evaluation Complete ======================")
print("Class confusion matrix:")
print(cls_acc)
print(f"\nAccuracy: top 1: {top1.avg:.02f}%\ttop 5: {top5.avg:.02f}%")
if __name__ == "__main__":
main()
|
the-stack_0_24690
|
import csv
import json
import os
from functools import lru_cache
import jsonpointer
import requests
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.roles import set_classes
from myst_parser.main import to_docutils
from ocdsextensionregistry import ExtensionRegistry
from sphinx.errors import SphinxError
live_branch = os.getenv('TRAVIS_BRANCH', os.getenv('GITHUB_REF', '').rsplit('/', 1)[-1]) in {'1.0', '1.1', 'latest'}
extensions_url = 'https://raw.githubusercontent.com/open-contracting/extension_registry/main/extensions.csv'
extension_versions_url = 'https://raw.githubusercontent.com/open-contracting/extension_registry/main/extension_versions.csv' # noqa: E501
extension_explorer_template = 'https://extensions.open-contracting.org/{}/extensions/{}/{}/'
WORKEDEXAMPLE_ENV_ATTRIBUTE = 'workedexample_all_worked_examples'
@lru_cache()
def get_extension_explorer_extensions_json():
return requests.get('https://extensions.open-contracting.org/extensions.json').json()
class Error(SphinxError):
category = 'sphinxcontrib-opencontracting error'
class FieldDescription(Directive):
required_arguments = 2
def run(self):
filename = self.arguments[0]
pointer = self.arguments[1]
env = self.state.document.settings.env
path = os.path.join(os.path.dirname(env.doc2path(env.docname)), filename)
env.note_dependency(path)
try:
with open(path, encoding='utf-8') as f:
schema = json.load(f)
description = jsonpointer.resolve_pointer(schema, f'{pointer}/description')
except FileNotFoundError:
raise self.error(f'JSON Schema file not found: {path}')
except PermissionError:
raise self.error(f'JSON Schema file not readable: {path}')
except json.decoder.JSONDecodeError:
raise self.error(f'JSON Schema file not valid: {path}')
except jsonpointer.JsonPointerException:
raise self.error(f"Pointer '{pointer}/description' not found: {path}")
block_quote = nodes.block_quote('', *to_docutils(description).children,
classes=['directive--field-description'])
return [block_quote]
class CodeDescription(Directive):
required_arguments = 2
def run(self):
config = self.state.document.settings.env.config
language = config.overrides.get('language', 'en')
try:
headers = config.codelist_headers[language]
except KeyError:
raise self.error(f"codelist_headers in conf.py is missing a '{language}' key")
filename = self.arguments[0]
code = self.arguments[1]
env = self.state.document.settings.env
path = os.path.join(os.path.dirname(env.doc2path(env.docname)), filename)
env.note_dependency(path)
try:
with open(path, encoding='utf-8') as f:
reader = csv.DictReader(f)
description = next(row[headers['description']] for row in reader if row[headers['code']] == code)
except FileNotFoundError:
raise self.error(f'CSV codelist file not found: {path}')
except PermissionError:
raise self.error(f'CSV codelist file not readable: {path}')
except KeyError as e:
raise self.error(f"Column {e} not found ({', '.join(reader.fieldnames)}): {path}")
except StopIteration:
raise self.error(f"Value '{code}' not found in column '{headers['code']}': {path}")
block_quote = nodes.block_quote('', *to_docutils(description).children,
classes=['directive--code-description'])
return [block_quote]
class ExtensionExplorerLinkList(Directive):
def run(self):
config = self.state.document.settings.env.config
extension_versions = config.extension_versions
language = config.overrides.get('language', 'en')
items = []
extensions = get_extension_explorer_extensions_json()
for identifier, version in extension_versions.items():
name = extensions[identifier]['versions'][version]['metadata']['name']
if language not in name:
language = 'en'
url = extension_explorer_template.format(language, identifier, version)
text = name[language]
if version != 'master':
text += f' ({version})'
reference = nodes.reference('', text, refuri=url)
paragraph = nodes.paragraph('', '', reference)
item = nodes.list_item('', paragraph)
items.append(item)
return [nodes.bullet_list('', *items)]
class ExtensionList(Directive):
required_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged,
'list': directives.unchanged}
def run(self):
config = self.state.document.settings.env.config
extension_versions = config.extension_versions
language = config.overrides.get('language', 'en')
extension_list_name = self.options.pop('list', '')
set_classes(self.options)
admonition_node = nodes.admonition('', **self.options)
self.add_name(admonition_node)
title_text = self.arguments[0]
textnodes, _ = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *textnodes)
title.line = 0
title.source = 'extension_list_' + extension_list_name
admonition_node += title
if 'classes' not in self.options:
admonition_node['classes'] += ['admonition', 'note']
admonition_node['classes'] += ['extension_list']
admonition_node['ids'] += ['extensionlist-' + extension_list_name]
definition_list = nodes.definition_list()
definition_list.line = 0
# Only list core extensions whose version matches the version specified in `conf.py` and whose category matches
# the category specified by the directive's `list` option.
registry = ExtensionRegistry(extension_versions_url, extensions_url)
num = 0
for identifier, version in extension_versions.items():
extension = registry.get(id=identifier, core=True, version=version)
if extension_list_name and extension.category != extension_list_name:
continue
# Avoid "403 Client Error: rate limit exceeded for url" on development branches.
try:
metadata = extension.metadata
except requests.exceptions.HTTPError:
if live_branch:
raise
metadata = {'name': {'en': identifier}, 'description': {'en': identifier}}
name = metadata['name']['en']
description = metadata['description']['en']
some_term, _ = self.state.inline_text(name, self.lineno)
some_def, _ = self.state.inline_text(description, self.lineno)
link = nodes.reference(name, '', *some_term)
link['refuri'] = extension_explorer_template.format(language, identifier, version)
link['translatable'] = True
link.source = 'extension_list_' + extension_list_name
link.line = num + 1
term = nodes.term(name, '', link)
definition_list += term
text = nodes.paragraph(description, '', *some_def)
text.source = 'extension_list_' + extension_list_name
text.line = num + 1
definition_list += nodes.definition(description, text)
if extension_list_name and not registry.filter(category=extension_list_name):
raise self.warning(f'No extensions have category {extension_list_name} in extensionlist directive')
admonition_node += definition_list
community = "The following are community extensions and are not maintained by Open Contracting Partnership."
community_text, _ = self.state.inline_text(community, self.lineno)
community_paragraph = nodes.paragraph(community, *community_text)
community_paragraph['classes'] += ['hide']
community_paragraph.source = 'extension_list_' + extension_list_name
community_paragraph.line = num + 2
admonition_node += community_paragraph
return [admonition_node]
class worked_example_list(nodes.General, nodes.Element):
pass
class worked_example(nodes.General, nodes.Element):
pass
def visit_worked_example(self, node):
self.visit_paragraph(node)
def depart_worked_example(self, node):
self.depart_paragraph(node)
class WorkedExampleList(Directive):
required_arguments = 1
final_argument_whitespace = True
option_spec = {'tag': directives.unchanged}
def run(self):
title = self.arguments[0]
tag = self.options.pop('tag', '')
return [worked_example_list(tag=tag, title=title)]
class WorkedExample(Directive):
required_arguments = 1
final_argument_whitespace = True
option_spec = {'tag': directives.unchanged}
def run(self):
env = self.state.document.settings.env
title = self.arguments[0]
tag = self.options.pop('tag', '')
target_id = f'worked-example-{env.new_serialno("worked-example")}'
target_node = nodes.target('', '', ids=[target_id])
node = worked_example()
if not hasattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE):
setattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE, [])
getattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE).append({
'docname': env.docname,
'lineno': self.lineno,
'target': target_node,
'title': title,
'tag': tag,
})
return [target_node, node]
def purge_worked_examples(app, env, docname):
if not hasattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE):
return
setattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE, [
example for example in getattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE) if example['docname'] != docname
])
def merge_worked_examples(app, env, docnames, other):
if not hasattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE):
setattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE, [])
if hasattr(other, WORKEDEXAMPLE_ENV_ATTRIBUTE):
getattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE).extend(getattr(other, WORKEDEXAMPLE_ENV_ATTRIBUTE))
def process_worked_example_nodes(app, doctree, fromdocname):
env = app.builder.env
if not hasattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE):
setattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE, [])
for node in doctree.traverse(worked_example_list):
title = node['title']
tag = node['tag']
title_node = nodes.title('', title)
admonition_node = nodes.admonition('')
admonition_node['classes'] += ['note']
admonition_node += title_node
items = []
for example in getattr(env, WORKEDEXAMPLE_ENV_ATTRIBUTE):
if tag != example['tag']:
continue
uri = f"{app.builder.get_relative_uri(fromdocname, example['docname'])}#{example['target']['refid']}"
reference = nodes.reference('', example['title'], refuri=uri)
reference['translatable'] = True
paragraph = nodes.paragraph('', '', reference)
item = nodes.list_item('', paragraph)
items.append(item)
if not items:
raise Error(f'No worked examples are tagged with {tag}')
admonition_node += nodes.bullet_list('', *items)
node.replace_self(admonition_node)
def setup(app):
app.add_directive('field-description', FieldDescription)
app.add_directive('code-description', CodeDescription)
app.add_directive('extensionexplorerlinklist', ExtensionExplorerLinkList)
app.add_directive('extensionlist', ExtensionList)
app.add_directive('workedexample', WorkedExample)
app.add_directive('workedexamplelist', WorkedExampleList)
app.add_node(worked_example_list)
app.add_node(worked_example,
html=(visit_worked_example, depart_worked_example),
latex=(visit_worked_example, depart_worked_example),
text=(visit_worked_example, depart_worked_example))
app.connect('doctree-resolved', process_worked_example_nodes)
app.connect('env-purge-doc', purge_worked_examples)
app.connect('env-merge-info', merge_worked_examples)
app.add_config_value('extension_versions', {}, True)
app.add_config_value('codelist_headers', {
'en': {'code': 'Code', 'description': 'Description'},
'es': {'code': 'Código', 'description': 'Descripción'},
'fr': {'code': 'Code', 'description': 'Description'},
'it': {'code': 'Codice', 'description': 'Descrizione'},
}, True)
|
the-stack_0_24696
|
# TODO
# - Array conversion improvements:
# - return custom iterable object instead of Blob when converting to array
# - check array dim on conversion
# - On print extensions, print the reflected internal representation of the object (worth the extra ROM?)
# - Verify that when mp_obj is given it is indeed the right type (mp_lv_obj_t). Report error if not. can be added to mp_to_lv.
# - Implement inheritance instead of embed base methods (how? seems it's not supported, see https://github.com/micropython/micropython/issues/1159)
# - Prevent writing to const fields, but allow reading
# - When converting mp to ptr (and vice versa), verify that types are compatible. Now all pointers are casted to void*.
from __future__ import print_function
import collections
import sys
import struct
import copy
from itertools import chain
from functools import lru_cache
import json
def memoize(func):
@lru_cache(maxsize=1000000)
def memoized(*args, **kwargs):
return func(*args, **kwargs)
return memoized
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# from pudb.remote import set_trace
# set_trace(term_size=(180, 50))
from sys import argv
from argparse import ArgumentParser
import subprocess, re
from os.path import dirname, abspath
from os.path import commonprefix
script_path = dirname(abspath(__file__))
sys.path.insert(0, '%s/../pycparser' % script_path)
from pycparser import c_parser, c_ast, c_generator
#
# Argument parsing
#
argParser = ArgumentParser()
argParser.add_argument('-I', '--include', dest='include', help='Preprocesor include path', metavar='<Include Path>', action='append')
argParser.add_argument('-D', '--define', dest='define', help='Define preprocessor macro', metavar='<Macro Name>', action='append')
argParser.add_argument('-E', '--external-preprocessing', dest='ep', help='Prevent preprocessing. Assume input file is already preprocessed', metavar='<Preprocessed File>', action='store')
argParser.add_argument('-M', '--module_name', dest='module_name', help='Module name', metavar='<Module name string>', action='store')
argParser.add_argument('-MP', '--module_prefix', dest='module_prefix', help='Module prefix that starts every function name', metavar='<Prefix string>', action='store')
argParser.add_argument('-MD', '--metadata', dest='metadata', help='Optional file to emit metadata (introspection)', metavar='<MetaData File Name>', action='store')
argParser.add_argument('input', nargs='+')
argParser.set_defaults(include=[], define=[], ep=None, input=[])
args = argParser.parse_args()
module_name = args.module_name
module_prefix = args.module_prefix if args.module_prefix else args.module_name
#
# C proceprocessing, if needed, or just read the input files.
#
if not args.ep:
pp_cmd = 'gcc -E -std=c99 -DPYCPARSER {macros} {include} {input} {first_input}'.format(
input=' '.join('-include %s' % inp for inp in args.input),
first_input= '%s' % args.input[0],
macros = ' '.join('-D%s' % define for define in args.define),
include=' '.join('-I %s' % inc for inc in args.include))
s = subprocess.check_output(pp_cmd.split()).decode()
else:
pp_cmd = 'Preprocessing was disabled.'
s = ''
with open(args.ep, 'r') as f:
s += f.read()
#
# AST parsing helper functions
#
@memoize
def remove_declname(ast):
if hasattr(ast, 'declname'):
ast.declname = None
if isinstance(ast, tuple):
remove_declname(ast[1])
return
for i, c1 in enumerate(ast.children()):
child = ast.children()[i]
remove_declname(child)
@memoize
def add_default_declname(ast, name):
if hasattr(ast, 'declname'):
if (ast.declname == None):
ast.declname = name
if isinstance(ast, tuple):
add_default_declname(ast[1], name)
return
for i, c1 in enumerate(ast.children()):
child = ast.children()[i]
add_default_declname(child, name)
@memoize
def convert_array_to_ptr(ast):
if hasattr(ast, 'type') and isinstance(ast.type, c_ast.ArrayDecl):
ast.type = c_ast.PtrDecl(ast.type.quals if hasattr(ast.type, 'quals') else [], ast.type.type)
if isinstance(ast, tuple):
return convert_array_to_ptr(ast[1])
for i, c1 in enumerate(ast.children()):
child = ast.children()[i]
convert_array_to_ptr(child)
@memoize
def remove_quals(ast):
if hasattr(ast,'quals'):
ast.quals = []
if hasattr(ast,'dim_quals'):
ast.dim_quals = []
if isinstance(ast, tuple):
return remove_quals(ast[1])
for i, c1 in enumerate(ast.children()):
child = ast.children()[i]
if not isinstance(child, c_ast.FuncDecl): # Don't remove quals which change function prorotype
remove_quals(child)
@memoize
def remove_explicit_struct(ast):
if isinstance(ast, c_ast.TypeDecl) and isinstance(ast.type, c_ast.Struct):
explicit_struct_name = ast.type.name
# eprint('--> replace %s by %s in:\n%s' % (explicit_struct_name, explicit_structs[explicit_struct_name] if explicit_struct_name in explicit_structs else '???', ast))
if explicit_struct_name:
if explicit_struct_name in explicit_structs:
ast.type = c_ast.IdentifierType([explicit_structs[explicit_struct_name]])
elif explicit_struct_name in structs:
ast.type = c_ast.IdentifierType([explicit_struct_name])
if isinstance(ast, tuple):
return remove_explicit_struct(ast[1])
for i, c1 in enumerate(ast.children()):
child = ast.children()[i]
remove_explicit_struct(child)
@memoize
def get_type(arg, **kwargs):
if isinstance(arg, str):
return arg
remove_quals_arg = 'remove_quals' in kwargs and kwargs['remove_quals']
arg_ast = copy.deepcopy(arg)
remove_explicit_struct(arg_ast)
if remove_quals_arg: remove_quals(arg_ast)
return gen.visit(arg_ast)
@memoize
def get_name(type):
if isinstance(type, c_ast.Decl):
return type.name
if isinstance(type, c_ast.Struct) and type.name and type.name in explicit_structs:
return explicit_structs[type.name]
if isinstance(type, c_ast.Struct):
return type.name
if isinstance(type, c_ast.TypeDecl):
return type.declname
if isinstance(type, c_ast.IdentifierType):
return type.names[0]
if isinstance(type, c_ast.FuncDecl):
return type.type.declname
# if isinstance(type, (c_ast.PtrDecl, c_ast.ArrayDecl)) and hasattr(type.type, 'declname'):
# return type.type.declname
if isinstance(type, (c_ast.PtrDecl, c_ast.ArrayDecl)):
return get_type(type, remove_quals=True)
else:
return gen.visit(type)
@memoize
def remove_arg_names(ast):
if isinstance(ast, c_ast.TypeDecl):
ast.declname = None
remove_arg_names(ast.type)
elif isinstance(ast, c_ast.Decl): remove_arg_names(ast.type)
elif isinstance(ast, c_ast.FuncDecl): remove_arg_names(ast.args)
elif isinstance(ast, c_ast.ParamList):
for param in ast.params: remove_arg_names(param)
# Create a function prototype AST from a function AST
@memoize
def function_prototype(func):
bare_func = copy.deepcopy(func)
remove_declname(bare_func)
ptr_decl = c_ast.PtrDecl(
quals=[],
type=bare_func.type)
func_proto = c_ast.Typename(
name=None,
quals=[],
type=ptr_decl)
return func_proto
#
# module specific text patterns
# IGNORECASE and "lower" are used to match both function and enum names
#
base_obj_name = 'obj'
base_obj_type = '%s_%s_t' % (module_prefix, base_obj_name)
lv_ext_pattern = re.compile('^{prefix}_([^_]+)_ext_t'.format(prefix=module_prefix))
lv_obj_pattern = re.compile('^{prefix}_([^_]+)'.format(prefix=module_prefix), re.IGNORECASE)
lv_func_pattern = re.compile('^{prefix}_(.+)'.format(prefix=module_prefix), re.IGNORECASE)
create_obj_pattern = re.compile('^{prefix}_([^_]+)_create'.format(prefix=module_prefix))
lv_method_pattern = re.compile('^{prefix}_[^_]+_(.+)'.format(prefix=module_prefix), re.IGNORECASE)
lv_base_obj_pattern = re.compile('^(struct _){{0,1}}{prefix}_{base_name}_t( [*]){{0,1}}'.format(prefix=module_prefix, base_name = base_obj_name))
lv_str_enum_pattern = re.compile('^_{prefix}_STR_(.+)'.format(prefix=module_prefix.upper()))
lv_callback_type_pattern = re.compile('({prefix}_){{0,1}}(.+)_cb(_t){{0,1}}'.format(prefix=module_prefix))
lv_global_callback_pattern = re.compile('.*g_cb_t')
lv_func_returns_array = re.compile('.*_array$')
lv_enum_name_pattern = re.compile('^(ENUM_){{0,1}}({prefix}_){{0,1}}(.*)'.format(prefix=module_prefix.upper()))
# Prevent identifies names which are Python reserved words (add underscore in such case)
def sanitize(id, kwlist =
['False', 'None', 'True', 'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else',
'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'nonlocal', 'not', 'or',
'pass', 'raise', 'return', 'try', 'while', 'with', 'yield']):
if id in kwlist:
result = "_%s" % id
else:
result = id
result = result.strip()
result = result.replace(' ','_')
return result
@memoize
def simplify_identifier(id):
match_result = lv_func_pattern.match(id)
return match_result.group(1) if match_result else id
def obj_name_from_ext_name(ext_name):
return lv_ext_pattern.match(ext_name).group(1)
def obj_name_from_func_name(func_name):
return lv_obj_pattern.match(func_name).group(1)
def ctor_name_from_obj_name(obj_name):
return '{prefix}_{obj}_create'.format(prefix=module_prefix, obj=obj_name)
def is_method_of(func_name, obj_name):
return func_name.lower().startswith('{prefix}_{obj}_'.format(prefix=module_prefix, obj=obj_name).lower())
def method_name_from_func_name(func_name):
res = lv_method_pattern.match(func_name).group(1)
return res if res != "del" else "delete" # del is a resrved name, don't use it
def get_enum_name(enum):
match_result = lv_enum_name_pattern.match(enum)
return match_result.group(3) if match_result else enum
def str_enum_to_str(str_enum):
res = lv_str_enum_pattern.match(str_enum).group(1)
return ('%s_' % module_prefix.upper()) + res
def user_data_from_callback_func(callback_func_name):
return 'user_data'
# res = lv_callback_type_pattern.match(callback_func_name)
# return res.group(2) + '_user_data' if res and res.group(2) else None
def is_obj_ctor(func):
# ctor name must match pattern
if not create_obj_pattern.match(func.name): return False
# ctor must return a base_obj type
if not lv_base_obj_pattern.match(get_type(func.type.type, remove_quals=True)): return False
# ctor must receive (at least) two base obj parameters
args = func.type.args.params
if len(args) < 2: return False
if not lv_base_obj_pattern.match(get_type(args[0].type, remove_quals=True)): return False
if not lv_base_obj_pattern.match(get_type(args[1].type, remove_quals=True)): return False
return True
def is_global_callback(arg_type):
arg_type_str = get_name(arg_type.type)
# print('/* --> is_global_callback %s: %s */' % (lv_global_callback_pattern.match(arg_type_str), arg_type_str))
result = lv_global_callback_pattern.match(arg_type_str)
return result
#
# Initialization, data structures, helper functions
#
# We consider union as a struct, for simplicity
def is_struct(type):
return isinstance(type, c_ast.Struct) or isinstance(type, c_ast.Union)
obj_metadata = collections.OrderedDict()
func_metadata = collections.OrderedDict()
callback_metadata = collections.OrderedDict()
func_prototypes = {}
parser = c_parser.CParser()
gen = c_generator.CGenerator()
ast = parser.parse(s, filename='<none>')
func_defs = [x.decl for x in ast.ext if isinstance(x, c_ast.FuncDef)]
func_decls = [x for x in ast.ext if isinstance(x, c_ast.Decl) and isinstance(x.type, c_ast.FuncDecl)]
all_funcs = func_defs + func_decls
funcs = [f for f in all_funcs if not f.name.startswith('_')] # functions that start with underscore are usually internal
# eprint('... %s' % ',\n'.join(sorted('%s' % func.name for func in funcs)))
obj_ctors = [func for func in funcs if is_obj_ctor(func)]
for obj_ctor in obj_ctors:
funcs.remove(obj_ctor)
obj_names = [create_obj_pattern.match(ctor.name).group(1) for ctor in obj_ctors]
typedefs = [x.type for x in ast.ext if isinstance(x, c_ast.Typedef)] # and not (hasattr(x.type, 'declname') and lv_base_obj_pattern.match(x.type.declname))]
# eprint('... %s' % str(typedefs))
struct_typedefs = [typedef for typedef in typedefs if is_struct(typedef.type)]
structs = collections.OrderedDict((typedef.declname, typedef.type) for typedef in struct_typedefs if typedef.declname and typedef.type.decls) # and not lv_base_obj_pattern.match(typedef.declname))
structs_without_typedef = collections.OrderedDict((decl.type.name, decl.type) for decl in ast.ext if hasattr(decl, 'type') and is_struct(decl.type))
structs.update(structs_without_typedef) # This is for struct without typedef
explicit_structs = collections.OrderedDict((typedef.type.name, typedef.declname) for typedef in struct_typedefs if typedef.type.name) # and not lv_base_obj_pattern.match(typedef.type.name))
# print('/* --> structs:\n%s */' % ',\n'.join(sorted(str(structs[struct_name]) for struct_name in structs if struct_name)))
# print('/* --> structs_without_typedef:\n%s */' % ',\n'.join(sorted(str(structs_without_typedef[struct_name]) for struct_name in structs_without_typedef if struct_name)))
# print('/* --> explicit_structs:\n%s */' % ',\n'.join(sorted(str(explicit_structs[struct_name]) for struct_name in explicit_structs if struct_name)))
# eprint('/* --> structs without typedef:\n%s */' % ',\n'.join(sorted(str(structs[struct_name]) for struct_name in structs_without_typedef)))
def has_ctor(obj_name):
return ctor_name_from_obj_name(obj_name) in [ctor.name for ctor in obj_ctors]
def get_ctor(obj_name):
global obj_ctors
return next(ctor for ctor in obj_ctors if ctor.name == ctor_name_from_obj_name(obj_name))
def get_methods(obj_name):
global funcs
return [func for func in funcs \
if is_method_of(func.name,obj_name) and \
(not func.name == ctor_name_from_obj_name(obj_name))]
@memoize
def noncommon_part(member_name, stem_name):
common_part = commonprefix([member_name, stem_name])
n = len(common_part) - 1
while n > 0 and member_name[n] != '_': n-=1
return member_name[n+1:]
@memoize
def get_first_arg_type(func):
if not func.type.args:
return None
if not len(func.type.args.params) >= 1:
return None
if not func.type.args.params[0].type.type:
return None
return get_type(func.type.args.params[0].type.type, remove_quals = True)
# "struct function" starts with struct name (without _t), and their first argument is a pointer to the struct
# Need also to take into account struct functions of aliases of current struct.
@memoize
def get_struct_functions(struct_name):
global funcs
if not struct_name:
return []
base_struct_name = struct_name[:-2] if struct_name.endswith('_t') else struct_name
# eprint("get_struct_functions %s: %s" % (struct_name, [get_type(func.type.args.params[0].type.type, remove_quals = True) for func in funcs if func.name.startswith(base_struct_name)]))
# eprint("get_struct_functions %s: %s" % (struct_name, struct_aliases[struct_name] if struct_name in struct_aliases else ""))
# for func in funcs:
# print("/* get_struct_functions: func=%s, struct=%s, noncommon part=%s */" % (simplify_identifier(func.name), simplify_identifier(struct_name),
# noncommon_part(simplify_identifier(func.name), simplify_identifier(struct_name))))
reverse_aliases = [alias for alias in struct_aliases if struct_aliases[alias] == struct_name]
return ([func for func in funcs \
if noncommon_part(simplify_identifier(func.name), simplify_identifier(struct_name)) != simplify_identifier(func.name) \
and get_first_arg_type(func) == struct_name] if (struct_name in structs or len(reverse_aliases) > 0) else []) + \
(get_struct_functions(struct_aliases[struct_name]) if struct_name in struct_aliases else [])
@memoize
def is_struct_function(func):
return func in get_struct_functions(get_first_arg_type(func))
# is_static_member returns true if function does not receive the obj as the first argument
# and the object is not a struct function
@memoize
def is_static_member(func, obj_type=base_obj_type):
if is_struct_function(func):
return False
first_arg_type = get_first_arg_type(func)
return (first_arg_type == None) or (first_arg_type != obj_type)
# All object should inherit directly from base_obj, and not according to lv_ext, as disccussed on https://github.com/littlevgl/lv_binding_micropython/issues/19
parent_obj_names = {child_name: base_obj_name for child_name in obj_names if child_name != base_obj_name}
parent_obj_names[base_obj_name] = None
# Populate inheritance hierarchy according to lv_ext structures
# exts = {obj_name_from_ext_name(ext.name): ext for ext in ast.ext if hasattr(ext, 'name') and ext.name is not None and lv_ext_pattern.match(ext.name)}
# for obj_name, ext in exts.items():
# try:
# parent_ext_name = ext.type.type.decls[0].type.type.names[0]
# if lv_ext_pattern.match(parent_ext_name):
# parent_obj_names[obj_name] = obj_name_from_ext_name(parent_ext_name)
# except AttributeError:
# pass
# Parse Enums
enum_defs = [x for x in ast.ext if hasattr(x,'type') and isinstance(x.type, c_ast.Enum)]
enum_defs += [x.type for x in ast.ext if hasattr(x, 'type') and hasattr(x.type, 'type') and isinstance(x.type, c_ast.TypeDecl) and isinstance(x.type.type, c_ast.Enum)]
# Enum member access functions.
def get_enum_members(obj_name):
global enums
if not obj_name in enums:
return []
return [enum_member_name for enum_member_name, value in enums[obj_name].items()]
def get_enum_member_name(enum_member):
if enum_member[0].isdigit():
enum_member = '_' + enum_member # needs to be a valid attribute name
return enum_member
def get_enum_value(obj_name, enum_member):
return enums[obj_name][enum_member]
# eprint(enums)
# parse function pointers
func_typedefs = collections.OrderedDict((t.name, t) for t in ast.ext if isinstance(t, c_ast.Typedef) and isinstance(t.type, c_ast.PtrDecl) and isinstance(t.type.type, c_ast.FuncDecl))
# Global blobs
blobs = collections.OrderedDict((decl.name, decl.type.type) for decl in ast.ext \
if isinstance(decl, c_ast.Decl) \
and 'extern' in decl.storage \
and hasattr(decl, 'type') \
and isinstance(decl.type, c_ast.TypeDecl))
int_constants = []
#
# Type convertors
#
class MissingConversionException(ValueError):
pass
mp_to_lv = {
'mp_obj_t' : '(mp_obj_t)',
'va_list' : None,
'void *' : 'mp_to_ptr',
'const uint8_t *' : 'mp_to_ptr',
'const void *' : 'mp_to_ptr',
'bool' : 'mp_obj_is_true',
'char *' : '(char*)convert_from_str',
'const char *' : 'convert_from_str',
'const unsigned char *' : 'convert_from_str',
'char **' : 'mp_to_ptr',
'const char **' : 'mp_to_ptr',
'%s_obj_t *'% module_prefix : 'mp_to_lv',
'uint8_t' : '(uint8_t)mp_obj_get_int',
'uint16_t' : '(uint16_t)mp_obj_get_int',
'uint32_t' : '(uint32_t)mp_obj_get_int',
'uint64_t' : '(uint64_t)mp_obj_get_ull',
'unsigned' : '(unsigned)mp_obj_get_int',
'unsigned int' : '(unsigned int)mp_obj_get_int',
'unsigned char' : '(unsigned char)mp_obj_get_int',
'unsigned short' : '(unsigned short)mp_obj_get_int',
'unsigned long' : '(unsigned long)mp_obj_get_int',
'unsigned long int' : '(unsigned long int)mp_obj_get_int',
'unsigned long long' : '(unsigned long long)mp_obj_get_ull',
'unsigned long long int' : '(unsigned long long int)mp_obj_get_ull',
'int8_t' : '(int8_t)mp_obj_get_int',
'int16_t' : '(int16_t)mp_obj_get_int',
'int32_t' : '(int32_t)mp_obj_get_int',
'int64_t' : '(int64_t)mp_obj_get_ull',
'size_t' : '(size_t)mp_obj_get_int',
'int' : '(int)mp_obj_get_int',
'char' : '(char)mp_obj_get_int',
'short' : '(short)mp_obj_get_int',
'long' : '(long)mp_obj_get_int',
'long int' : '(long int)mp_obj_get_int',
'long long' : '(long long)mp_obj_get_ull',
'long long int' : '(long long int)mp_obj_get_ull',
'float' : 'mp_obj_get_float',
}
lv_to_mp = {
'mp_obj_t' : '(mp_obj_t)',
'va_list' : None,
'void *' : 'ptr_to_mp',
'const uint8_t *' : 'ptr_to_mp',
'const void *' : 'ptr_to_mp',
'bool' : 'convert_to_bool',
'char *' : 'convert_to_str',
'const char *' : 'convert_to_str',
'const unsigned char *' : 'convert_to_str',
'char **' : 'ptr_to_mp',
'const char **' : 'ptr_to_mp',
'%s_obj_t *'% module_prefix : 'lv_to_mp',
'uint8_t' : 'mp_obj_new_int_from_uint',
'uint16_t' : 'mp_obj_new_int_from_uint',
'uint32_t' : 'mp_obj_new_int_from_uint',
'uint64_t' : 'mp_obj_new_int_from_ull',
'unsigned' : 'mp_obj_new_int_from_uint',
'unsigned int' : 'mp_obj_new_int_from_uint',
'unsigned char' : 'mp_obj_new_int_from_uint',
'unsigned short' : 'mp_obj_new_int_from_uint',
'unsigned long' : 'mp_obj_new_int_from_uint',
'unsigned long int' : 'mp_obj_new_int_from_uint',
'unsigned long long' : 'mp_obj_new_int_from_ull',
'unsigned long long int' : 'mp_obj_new_int_from_ull',
'int8_t' : 'mp_obj_new_int',
'int16_t' : 'mp_obj_new_int',
'int32_t' : 'mp_obj_new_int',
'int64_t' : 'mp_obj_new_int_from_ll',
'size_t' : 'mp_obj_new_int_from_uint',
'int' : 'mp_obj_new_int',
'char' : 'mp_obj_new_int',
'short' : 'mp_obj_new_int',
'long' : 'mp_obj_new_int',
'long int' : 'mp_obj_new_int',
'long long' : 'mp_obj_new_int_from_ll',
'long long int' : 'mp_obj_new_int_from_ll',
'float' : 'mp_obj_new_float',
}
lv_mp_type = {
'mp_obj_t' : 'object',
'va_list' : None,
'void *' : 'pointer',
'const uint8_t *' : 'pointer',
'const void *' : 'pointer',
'bool' : 'bool',
'char *' : 'str',
'const char *' : 'str',
'const unsigned char *' : 'str',
'char **' : 'pointer',
'const char **' : 'pointer',
'%s_obj_t *'% module_prefix : 'object',
'uint8_t' : 'int',
'uint16_t' : 'int',
'uint32_t' : 'int',
'uint64_t' : 'int',
'unsigned' : 'int',
'unsigned int' : 'int',
'unsigned char' : 'int',
'unsigned short' : 'int',
'unsigned long' : 'int',
'unsigned long int' : 'int',
'unsigned long long' : 'int',
'unsigned long long int' : 'int',
'int8_t' : 'int',
'int16_t' : 'int',
'int32_t' : 'int',
'int64_t' : 'int',
'size_t' : 'int',
'int' : 'int',
'char' : 'int',
'short' : 'int',
'long' : 'int',
'long int' : 'int',
'long long' : 'int',
'long long int' : 'int',
'void' : 'NoneType',
'float' : 'float',
}
lv_to_mp_byref = {}
lv_to_mp_funcptr = {}
#
# Emit Header
#
print ("""
/*
* Auto-Generated file, DO NOT EDIT!
*
* Command line:
* {cmd_line}
*
* Preprocessing command:
* {pp_cmd}
*
* Generating Objects: {objs}
*/
/*
* Mpy includes
*/
#include <stdlib.h>
#include <string.h>
#include "py/obj.h"
#include "py/objint.h"
#include "py/objstr.h"
#include "py/runtime.h"
#include "py/binary.h"
#include "py/objarray.h"
/*
* {module_name} includes
*/
{lv_headers}
""".format(
module_name = module_name,
cmd_line=' '.join(argv),
pp_cmd=pp_cmd,
objs=", ".join(['%s(%s)' % (objname, parent_obj_names[objname]) for objname in obj_names]),
lv_headers='\n'.join('#include "%s"' % header for header in args.input)))
#
# Enable objects, if supported
#
if len(obj_names) > 0:
print("""
#define LV_OBJ_T {obj_type}
STATIC const mp_obj_type_t mp_{base_obj}_type;
STATIC inline const mp_obj_type_t *get_BaseObj_type()
{{
return &mp_{base_obj}_type;
}}
""".format(
obj_type = base_obj_type,
base_obj = base_obj_name
))
#
# Emit Mpy helper functions
#
print("""
/*
* Helper functions
*/
#ifndef GENMPY_UNUSED
#ifdef __GNUC__
#define GENMPY_UNUSED __attribute__ ((unused))
#else
#define GENMPY_UNUSED
#endif // __GNUC__
#endif // GENMPY_UNUSED
// Custom function mp object
typedef mp_obj_t (*mp_fun_ptr_var_t)(size_t n, const mp_obj_t *, void *ptr);
typedef struct mp_lv_obj_fun_builtin_var_t {
mp_obj_base_t base;
mp_uint_t n_args;
mp_fun_ptr_var_t mp_fun;
void *lv_fun;
} mp_lv_obj_fun_builtin_var_t;
STATIC mp_obj_t lv_fun_builtin_var_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
STATIC mp_int_t mp_func_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags);
GENMPY_UNUSED STATIC const mp_obj_type_t mp_lv_type_fun_builtin_var = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
.name = MP_QSTR_function,
.call = lv_fun_builtin_var_call,
.unary_op = mp_generic_unary_op,
.buffer_p = { .get_buffer = mp_func_get_buffer }
};
GENMPY_UNUSED STATIC const mp_obj_type_t mp_lv_type_fun_builtin_static_var = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BUILTIN_FUN,
.name = MP_QSTR_function,
.call = lv_fun_builtin_var_call,
.unary_op = mp_generic_unary_op,
.buffer_p = { .get_buffer = mp_func_get_buffer }
};
STATIC mp_obj_t lv_fun_builtin_var_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
assert(MP_OBJ_IS_TYPE(self_in, &mp_lv_type_fun_builtin_var) ||
MP_OBJ_IS_TYPE(self_in, &mp_lv_type_fun_builtin_static_var));
mp_lv_obj_fun_builtin_var_t *self = MP_OBJ_TO_PTR(self_in);
mp_arg_check_num(n_args, n_kw, self->n_args, self->n_args, false);
return self->mp_fun(n_args, args, self->lv_fun);
}
STATIC mp_int_t mp_func_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
(void)flags;
assert(MP_OBJ_IS_TYPE(self_in, &mp_lv_type_fun_builtin_var) ||
MP_OBJ_IS_TYPE(self_in, &mp_lv_type_fun_builtin_static_var));
mp_lv_obj_fun_builtin_var_t *self = MP_OBJ_TO_PTR(self_in);
bufinfo->buf = &self->lv_fun;
bufinfo->len = sizeof(self->lv_fun);
bufinfo->typecode = BYTEARRAY_TYPECODE;
return 0;
}
#define MP_DEFINE_CONST_LV_FUN_OBJ_VAR(obj_name, n_args, mp_fun, lv_fun) \\
const mp_lv_obj_fun_builtin_var_t obj_name = \\
{{&mp_lv_type_fun_builtin_var}, n_args, mp_fun, lv_fun}
#define MP_DEFINE_CONST_LV_FUN_OBJ_STATIC_VAR(obj_name, n_args, mp_fun, lv_fun) \\
const mp_lv_obj_fun_builtin_var_t obj_name = \\
{{&mp_lv_type_fun_builtin_static_var}, n_args, mp_fun, lv_fun}
// Casting
typedef struct mp_lv_struct_t
{
mp_obj_base_t base;
void *data;
} mp_lv_struct_t;
STATIC const mp_lv_struct_t mp_lv_null_obj;
#ifdef LV_OBJ_T
STATIC mp_int_t mp_lv_obj_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags);
#else
STATIC mp_int_t mp_lv_obj_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags){ return 0; }
#endif
STATIC mp_obj_t get_native_obj(mp_obj_t *mp_obj)
{
if (!MP_OBJ_IS_OBJ(mp_obj)) return mp_obj;
const mp_obj_type_t *native_type = ((mp_obj_base_t*)mp_obj)->type;
if (native_type->parent == NULL ||
(native_type->buffer_p.get_buffer == mp_lv_obj_get_buffer)) return mp_obj;
while (native_type->parent) native_type = native_type->parent;
return mp_obj_cast_to_native_base(mp_obj, MP_OBJ_FROM_PTR(native_type));
}
STATIC mp_obj_t dict_to_struct(mp_obj_t dict, const mp_obj_type_t *type);
STATIC mp_obj_t make_new_lv_struct(
const mp_obj_type_t *type,
size_t n_args,
size_t n_kw,
const mp_obj_t *args);
STATIC mp_obj_t *cast(mp_obj_t *mp_obj, const mp_obj_type_t *mp_type)
{
mp_obj_t *res = NULL;
if (mp_obj == mp_const_none && mp_type->make_new == &make_new_lv_struct) {
res = MP_OBJ_FROM_PTR(&mp_lv_null_obj);
} else if (MP_OBJ_IS_OBJ(mp_obj)) {
res = get_native_obj(mp_obj);
if (res){
const mp_obj_type_t *res_type = ((mp_obj_base_t*)res)->type;
if (res_type != mp_type){
if (res_type == &mp_type_dict &&
mp_type->make_new == &make_new_lv_struct)
res = dict_to_struct(res, mp_type);
else res = NULL;
}
}
}
if (res == NULL) nlr_raise(
mp_obj_new_exception_msg_varg(
&mp_type_SyntaxError, MP_ERROR_TEXT("Can't convert %s to %s!"), mp_obj_get_type_str(mp_obj), qstr_str(mp_type->name)));
return res;
}
// object handling
// This section is enabled only when objects are supported
#ifdef LV_OBJ_T
typedef LV_OBJ_T* (*lv_create)(LV_OBJ_T * par, const LV_OBJ_T * copy);
typedef struct mp_lv_obj_t {
mp_obj_base_t base;
LV_OBJ_T *lv_obj;
LV_OBJ_T *callbacks;
} mp_lv_obj_t;
STATIC inline LV_OBJ_T *mp_to_lv(mp_obj_t *mp_obj)
{
if (mp_obj == NULL || mp_obj == mp_const_none) return NULL;
mp_lv_obj_t *mp_lv_obj = MP_OBJ_TO_PTR(get_native_obj(mp_obj));
return mp_lv_obj->lv_obj;
}
STATIC inline LV_OBJ_T *mp_get_callbacks(mp_obj_t mp_obj)
{
if (mp_obj == NULL || mp_obj == mp_const_none) return NULL;
mp_lv_obj_t *mp_lv_obj = MP_OBJ_TO_PTR(get_native_obj(mp_obj));
if (!mp_lv_obj->callbacks) mp_lv_obj->callbacks = mp_obj_new_dict(0);
return mp_lv_obj->callbacks;
}
STATIC inline const mp_obj_type_t *get_BaseObj_type();
STATIC inline mp_obj_t *lv_to_mp(LV_OBJ_T *lv_obj)
{
if (lv_obj == NULL) return mp_const_none;
mp_lv_obj_t *self = (mp_lv_obj_t*)lv_obj->user_data;
if (!self)
{
self = m_new_obj(mp_lv_obj_t);
*self = (mp_lv_obj_t){
.base = {get_BaseObj_type()},
.lv_obj = lv_obj,
.callbacks = NULL,
};
lv_obj->user_data = self;
}
return MP_OBJ_FROM_PTR(self);
}
STATIC mp_obj_t make_new(
lv_create create,
const mp_obj_type_t *type,
size_t n_args,
size_t n_kw,
const mp_obj_t *args)
{
mp_arg_check_num(n_args, n_kw, 0, 2, false);
mp_lv_obj_t *self = m_new_obj(mp_lv_obj_t);
LV_OBJ_T *parent = n_args > 0? mp_to_lv(args[0]): NULL;
LV_OBJ_T *copy = n_args > 1? mp_to_lv(args[1]): NULL;
*self = (mp_lv_obj_t){
.base = {type},
.lv_obj = create(parent, copy),
.callbacks = NULL,
};
if (!self->lv_obj) return mp_const_none;
self->lv_obj->user_data = self;
return MP_OBJ_FROM_PTR(self);
}
STATIC void* mp_to_ptr(mp_obj_t self_in);
STATIC mp_obj_t cast_obj(mp_obj_t type_obj, mp_obj_t obj)
{
mp_lv_obj_t *self = m_new_obj(mp_lv_obj_t);
*self = (mp_lv_obj_t){
.base = {(const mp_obj_type_t*)type_obj},
.lv_obj = mp_to_ptr(obj),
.callbacks = NULL,
};
if (!self->lv_obj) return mp_const_none;
return MP_OBJ_FROM_PTR(self);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(cast_obj_obj, cast_obj);
STATIC MP_DEFINE_CONST_CLASSMETHOD_OBJ(cast_obj_class_method, MP_ROM_PTR(&cast_obj_obj));
STATIC mp_int_t mp_lv_obj_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
(void)flags;
mp_lv_obj_t *self = MP_OBJ_TO_PTR(self_in);
bufinfo->buf = &self->lv_obj;
bufinfo->len = sizeof(self->lv_obj);
bufinfo->typecode = BYTEARRAY_TYPECODE;
return 0;
}
#endif
STATIC inline mp_obj_t convert_to_bool(bool b)
{
return b? mp_const_true: mp_const_false;
}
STATIC inline mp_obj_t convert_to_str(const char *str)
{
return str? mp_obj_new_str(str, strlen(str)): mp_const_none;
}
STATIC inline const char *convert_from_str(mp_obj_t str)
{
if (str == NULL || str == mp_const_none)
return NULL;
if (MP_OBJ_IS_TYPE(str, &mp_type_bytearray) ||
MP_OBJ_IS_TYPE(str, &mp_type_memoryview)) {
mp_buffer_info_t buffer_info;
if (mp_get_buffer(str, &buffer_info, MP_BUFFER_READ)) {
return buffer_info.buf;
}
}
return mp_obj_str_get_str(str);
}
// struct handling
STATIC inline mp_lv_struct_t *mp_to_lv_struct(mp_obj_t mp_obj)
{
if (mp_obj == NULL || mp_obj == mp_const_none) return NULL;
if (!MP_OBJ_IS_OBJ(mp_obj)) nlr_raise(
mp_obj_new_exception_msg(
&mp_type_SyntaxError, MP_ERROR_TEXT("Struct argument is not an object!")));
mp_lv_struct_t *mp_lv_struct = MP_OBJ_TO_PTR(get_native_obj(mp_obj));
return mp_lv_struct;
}
STATIC inline size_t get_lv_struct_size(const mp_obj_type_t *type)
{
mp_obj_t size_obj = mp_obj_dict_get(type->locals_dict, MP_OBJ_NEW_QSTR(MP_QSTR_SIZE));
return (size_t)mp_obj_get_int(size_obj);
}
STATIC mp_obj_t make_new_lv_struct(
const mp_obj_type_t *type,
size_t n_args,
size_t n_kw,
const mp_obj_t *args)
{
if ((!MP_OBJ_IS_TYPE(type, &mp_type_type)) || type->make_new != &make_new_lv_struct)
nlr_raise(
mp_obj_new_exception_msg(
&mp_type_SyntaxError, MP_ERROR_TEXT("Argument is not a struct type!")));
size_t size = get_lv_struct_size(type);
mp_arg_check_num(n_args, n_kw, 0, 1, false);
mp_lv_struct_t *self = m_new_obj(mp_lv_struct_t);
*self = (mp_lv_struct_t){
.base = {type},
.data = m_malloc(size)
};
mp_lv_struct_t *other = n_args > 0? mp_to_lv_struct(cast(args[0], type)): NULL;
if (other) {
memcpy(self->data, other->data, size);
} else {
memset(self->data, 0, size);
}
return MP_OBJ_FROM_PTR(self);
}
STATIC void *copy_buffer(const void *buffer, size_t size)
{
void *new_buffer = m_malloc(size);
memcpy(new_buffer, buffer, size);
return new_buffer;
}
// Reference an existing lv struct (or part of it)
STATIC mp_obj_t lv_to_mp_struct(const mp_obj_type_t *type, void *lv_struct)
{
if (lv_struct == NULL) return mp_const_none;
mp_lv_struct_t *self = m_new_obj(mp_lv_struct_t);
*self = (mp_lv_struct_t){
.base = {type},
.data = lv_struct
};
return MP_OBJ_FROM_PTR(self);
}
STATIC void call_parent_methods(mp_obj_t obj, qstr attr, mp_obj_t *dest)
{
const mp_obj_type_t *type = mp_obj_get_type(obj);
while (type->locals_dict != NULL) {
// generic method lookup
// this is a lookup in the object (ie not class or type)
assert(type->locals_dict->base.type == &mp_type_dict); // MicroPython restriction, for now
mp_map_t *locals_map = &type->locals_dict->map;
mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
if (elem != NULL) {
mp_convert_member_lookup(obj, type, elem->value, dest);
break;
}
if (type->parent == NULL) {
break;
}
// search parents
type = type->parent;
}
}
// Convert dict to struct
STATIC mp_obj_t dict_to_struct(mp_obj_t dict, const mp_obj_type_t *type)
{
mp_obj_t mp_struct = make_new_lv_struct(type, 0, 0, NULL);
mp_obj_t *native_dict = cast(dict, &mp_type_dict);
mp_map_t *map = mp_obj_dict_get_map(native_dict);
if (map == NULL) return mp_const_none;
for (uint i = 0; i < map->alloc; i++) {
mp_obj_t key = map->table[i].key;
mp_obj_t value = map->table[i].value;
if (key != MP_OBJ_NULL) {
mp_obj_t dest[] = {MP_OBJ_SENTINEL, value};
type->attr(mp_struct, mp_obj_str_get_qstr(key), dest);
if (dest[0]) nlr_raise(
mp_obj_new_exception_msg_varg(
&mp_type_SyntaxError, MP_ERROR_TEXT("Cannot set field %s on struct %s!"), qstr_str(mp_obj_str_get_qstr(key)), qstr_str(type->name)));
}
}
return mp_struct;
}
// Convert mp object to ptr
STATIC void* mp_to_ptr(mp_obj_t self_in)
{
mp_buffer_info_t buffer_info;
if (self_in == mp_const_none)
return NULL;
// if (MP_OBJ_IS_INT(self_in))
// return (void*)mp_obj_get_int(self_in);
if (!mp_get_buffer(self_in, &buffer_info, MP_BUFFER_READ)) {
// No buffer protocol - this is not a Struct or a Blob, it's some other mp object.
// We only allow setting dict directly, since it's useful to setting user_data for passing data to C.
// On other cases throw an exception, to avoid a crash later
if (MP_OBJ_IS_TYPE(self_in, &mp_type_dict))
return MP_OBJ_TO_PTR(self_in);
else nlr_raise(
mp_obj_new_exception_msg_varg(
&mp_type_SyntaxError, MP_ERROR_TEXT("Cannot convert '%s' to pointer!"), mp_obj_get_type_str(self_in)));
}
if (MP_OBJ_IS_STR_OR_BYTES(self_in) ||
MP_OBJ_IS_TYPE(self_in, &mp_type_bytearray) ||
MP_OBJ_IS_TYPE(self_in, &mp_type_memoryview))
return buffer_info.buf;
else
{
void *result;
if (buffer_info.len != sizeof(result) || buffer_info.typecode != BYTEARRAY_TYPECODE){
nlr_raise(
mp_obj_new_exception_msg_varg(
&mp_type_SyntaxError, MP_ERROR_TEXT("Cannot convert %s to pointer! (buffer does not represent a pointer)"), mp_obj_get_type_str(self_in)));
}
memcpy(&result, buffer_info.buf, sizeof(result));
return result;
}
}
// Blob is a wrapper for void*
STATIC void mp_blob_print(const mp_print_t *print,
mp_obj_t self_in,
mp_print_kind_t kind)
{
mp_printf(print, "Blob");
}
STATIC mp_int_t mp_blob_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
(void)flags;
mp_lv_struct_t *self = MP_OBJ_TO_PTR(self_in);
bufinfo->buf = &self->data;
bufinfo->len = sizeof(self->data);
bufinfo->typecode = BYTEARRAY_TYPECODE;
return 0;
}
STATIC const mp_obj_fun_builtin_var_t mp_lv_dereference_obj;
// Sometimes (but not always!) Blob represents a Micropython object.
// In such cases it's safe to cast the Blob back to the Micropython object
// cast argument is the underlying object type, and it's optional.
STATIC mp_obj_t mp_blob_cast(size_t argc, const mp_obj_t *argv)
{
mp_obj_t self = argv[0];
void *ptr = mp_to_ptr(self);
if (argc == 1) return MP_OBJ_FROM_PTR(ptr);
mp_obj_t type = argv[1];
if (!MP_OBJ_IS_TYPE(type, &mp_type_type))
nlr_raise(
mp_obj_new_exception_msg(
&mp_type_SyntaxError, MP_ERROR_TEXT("Cast argument must be a type!")));
return cast(MP_OBJ_FROM_PTR(ptr), type);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_blob_cast_obj, 1, 2, mp_blob_cast);
STATIC const mp_rom_map_elem_t mp_blob_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR___dereference__), MP_ROM_PTR(&mp_lv_dereference_obj) },
{ MP_ROM_QSTR(MP_QSTR_cast), MP_ROM_PTR(&mp_blob_cast_obj) },
};
STATIC MP_DEFINE_CONST_DICT(mp_blob_locals_dict, mp_blob_locals_dict_table);
STATIC const mp_obj_type_t mp_blob_type = {
{ &mp_type_type },
.name = MP_QSTR_Blob,
.print = mp_blob_print,
//.make_new = make_new_blob,
.locals_dict = (mp_obj_dict_t*)&mp_blob_locals_dict,
.buffer_p = { .get_buffer = mp_blob_get_buffer }
};
STATIC const mp_lv_struct_t mp_lv_null_obj = { {&mp_blob_type}, NULL };
STATIC inline mp_obj_t ptr_to_mp(void *data)
{
return lv_to_mp_struct(&mp_blob_type, data);
}
// Cast pointer to struct
STATIC mp_obj_t mp_lv_cast(mp_obj_t type_obj, mp_obj_t ptr_obj)
{
mp_lv_struct_t *self = m_new_obj(mp_lv_struct_t);
*self = (mp_lv_struct_t){
.base = {(const mp_obj_type_t*)type_obj},
.data = mp_to_ptr(ptr_obj)
};
return MP_OBJ_FROM_PTR(self);
}
// Cast instance. Can be used in ISR when memory allocation is prohibited
STATIC inline mp_obj_t mp_lv_cast_instance(mp_obj_t self_in, mp_obj_t ptr_obj)
{
mp_lv_struct_t *self = MP_OBJ_TO_PTR(self_in);
self->data = mp_to_ptr(ptr_obj);
return self_in;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_lv_cast_obj, mp_lv_cast);
STATIC MP_DEFINE_CONST_CLASSMETHOD_OBJ(mp_lv_cast_class_method, MP_ROM_PTR(&mp_lv_cast_obj));
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_lv_cast_instance_obj, mp_lv_cast_instance);
// Dereference a struct/blob. This allows access to the raw data the struct holds
STATIC mp_obj_t mp_lv_dereference(size_t argc, const mp_obj_t *argv)
{
mp_obj_t self_in = argv[0];
mp_obj_t size_in = argc > 1? argv[1]: mp_const_none;
mp_lv_struct_t *self = MP_OBJ_TO_PTR(self_in);
size_t size = 0;
if (size_in == mp_const_none){
const mp_obj_type_t *type = self->base.type;
size = get_lv_struct_size(type);
} else {
size = (size_t)mp_obj_get_int(size_in);
}
mp_obj_array_t *view = MP_OBJ_TO_PTR(mp_obj_new_memoryview(BYTEARRAY_TYPECODE,
size, self->data));
view->typecode |= 0x80; // used to indicate writable buffer
return MP_OBJ_FROM_PTR(view);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_lv_dereference_obj, 1, 2, mp_lv_dereference);
// Callback function handling
// Callback is either a callable object or a pointer. If it's a callable object, set user_data to the callback.
// Multiple callbacks are kept per object/struct using a dict that associate callback name with callback object
// In case of an lv_obj_t, user_data is mp_lv_obj_t which contains a member "callbacks" for that dict.
// In case of a struct, user_data is a pointer to that dict directly
STATIC mp_obj_t get_callback_dict_from_user_data(void *user_data)
{
if (user_data){
mp_obj_t obj = MP_OBJ_FROM_PTR(user_data);
#ifdef LV_OBJ_T
return
MP_OBJ_IS_TYPE(obj, &mp_type_dict)? obj: // Handle the case of dict for a struct
mp_get_callbacks(obj); // Handle the case of mp_lv_obj_t for an lv_obj_t
#else
return obj;
#endif
}
return NULL;
}
STATIC void *mp_lv_callback(mp_obj_t mp_callback, void *lv_callback, qstr callback_name, void **user_data_ptr)
{
if (lv_callback && mp_obj_is_callable(mp_callback)){
if (user_data_ptr){
// user_data is either a dict of callbacks in case of struct, or a pointer to mp_lv_obj_t in case of lv_obj_t
if (! (*user_data_ptr) ) *user_data_ptr = MP_OBJ_TO_PTR(mp_obj_new_dict(0)); // if it's NULL - it's a dict for a struct
mp_obj_t callbacks = get_callback_dict_from_user_data(*user_data_ptr);
mp_obj_dict_store(callbacks, MP_OBJ_NEW_QSTR(callback_name), mp_callback);
}
return lv_callback;
} else {
return mp_to_ptr(mp_callback);
}
}
// Function pointers wrapper
STATIC mp_obj_t mp_lv_funcptr(const mp_lv_obj_fun_builtin_var_t *mp_fun, void *lv_fun, void *lv_callback, qstr func_name, void *user_data)
{
if (lv_fun == NULL)
return mp_const_none;
if (lv_fun == lv_callback) {
mp_obj_t callbacks = get_callback_dict_from_user_data(user_data);
if (callbacks)
return mp_obj_dict_get(callbacks, MP_OBJ_NEW_QSTR(func_name));
}
mp_lv_obj_fun_builtin_var_t *funcptr = m_new_obj(mp_lv_obj_fun_builtin_var_t);
*funcptr = *mp_fun;
funcptr->lv_fun = lv_fun;
return MP_OBJ_FROM_PTR(funcptr);
}
// Missing implementation for 64bit integer conversion
STATIC unsigned long long mp_obj_get_ull(mp_obj_t obj)
{
if (mp_obj_is_small_int(obj))
return MP_OBJ_SMALL_INT_VALUE(obj);
unsigned long long val = 0;
bool big_endian = !(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__);
mp_obj_int_to_bytes_impl(obj, big_endian, sizeof(val), (byte*)&val);
return val;
}
""")
#
# Add regular enums with integer values
#
enums = collections.OrderedDict()
for enum_def in enum_defs:
# eprint("--> %s" % enum_def)
while hasattr(enum_def.type, 'name') and not enum_def.type.values:
enum_def = next(e for e in enum_defs if hasattr(e.type, 'name') and e.type.name == enum_def.type.name and e.type.values)
member_names = [member.name for member in enum_def.type.values.enumerators if not member.name.startswith('_')]
enum_name = commonprefix(member_names)
enum_name = "_".join(enum_name.split("_")[:-1]) # remove suffix
enum = collections.OrderedDict()
for member in enum_def.type.values.enumerators:
if member.name.startswith('_'):
continue
member_name = member.name[len(enum_name)+1:] if len(enum_name) > 0 else member.name
if len(enum_name) > 0 and get_enum_name(enum_name) != 'ENUM':
enum[member_name] = 'MP_ROM_INT(%s)' % member.name
else:
int_constants.append(member.name)
if len(enum) > 0:
if len(get_enum_name(enum_name)) > 0:
prev_enum = enums.get(enum_name)
if prev_enum:
prev_enum.update(enum)
else:
enums[enum_name] = enum
# Add special string enums
print ('''
/*
* LVGL string constants
*/
''')
for enum_def in enum_defs:
if not enum_def.type.values:
continue
member_names = [str_enum_to_str(member.name) for member in enum_def.type.values.enumerators if lv_str_enum_pattern.match(member.name)]
enum_name = commonprefix(member_names)
enum_name = "_".join(enum_name.split("_")[:-1]) # remove suffix
enum = collections.OrderedDict()
if enum_name:
for member in enum_def.type.values.enumerators:
full_name = str_enum_to_str(member.name)
member_name = full_name[len(enum_name)+1:]
print('MP_DEFINE_STR_OBJ(mp_%s, %s);' % (full_name, full_name))
enum[member_name] = '&mp_%s' % full_name
if len(enum) > 0:
if enum_name in enums:
enums[enum_name].update(enum)
else:
enums[enum_name] = enum
# eprint('--> enums: \n%s' % enums)
# eprint(',\n'.join(sorted('%s : %s' % (name, get_type(blobs[name])) for name in blobs)))
#
# Callbacks helper functions
#
def decl_to_callback(decl):
if not hasattr(decl, 'type'):
return None
if (isinstance(decl.type, c_ast.PtrDecl) and isinstance(decl.type.type, c_ast.FuncDecl)):
return (decl.name, decl.type.type)
# print('/* callback: ADDED CALLBACK: %s\n%s */' % (gen.visit(decl.type.type), decl.type.type))
elif isinstance(decl.type, c_ast.FuncDecl):
return (decl.name, decl.type)
# print('/* callback: ADDED CALLBACK: %s\n%s */' % (gen.visit(decl.type.type), decl.type.type))
elif isinstance(decl.type, c_ast.TypeDecl) and hasattr(decl.type.type,'names'):
func_typedef_name = decl.type.type.names[0]
# print('/* --> callback: TYPEDEF CALLBACK: %s: %s */' % (decl.name, func_typedef_name))
if func_typedef_name in func_typedefs:
return (decl.name, func_typedefs[func_typedef_name].type.type)
# print('/* callback: ADDED CALLBACK: %s\n%s */' % (func_typedef_name, func_typedefs[func_typedef_name]))
else: return None
def get_user_data(func, func_name = None, containing_struct = None, containing_struct_name = None):
args = func.args.params
if not func_name: func_name = get_arg_name(func.type)
# print('/* --> callback: func_name = %s, args = %s */' % (func_name, repr(args)))
user_data_found = False
user_data = 'None'
if len(args) > 0 and isinstance(args[0].type, c_ast.PtrDecl):
# if isinstance(args[0].type.type.type, c_ast.Struct):
# struct_arg_type_name = args[0].type.type.type.name # PtrDecl.TypeDecl.Struct. Needed to omit 'struct' keyword.
# else:
# struct_arg_type_name = get_type(args[0].type.type, remove_quals = True)
struct_arg_type_name = get_type(args[0].type.type, remove_quals = True)
# print('/* --> get_user_data: containing_struct_name = %s, struct_arg_type_name = %s */' % (containing_struct_name, struct_arg_type_name))
if containing_struct_name and struct_arg_type_name != containing_struct_name:
return None
if not containing_struct:
try_generate_type(args[0].type)
if struct_arg_type_name in structs:
containing_struct = structs[struct_arg_type_name]
# print('/* --> containing_struct = %s */' % containing_struct)
# if struct_arg_type_name in mp_to_lv:
# print('/* --> callback: %s First argument is %s */' % (gen.visit(func), struct_arg_type_name))
if containing_struct:
flatten_struct_decls = flatten_struct(containing_struct.decls)
user_data = user_data_from_callback_func(func_name)
user_data_found = user_data in [decl.name for decl in flatten_struct_decls]
# print('/* --> callback: user_data=%s user_data_found=%s containing_struct=%s */' % (user_data, user_data_found, containing_struct))
if user_data_found: return user_data
else: return None
#
# Generate structs when needed
#
generated_structs = collections.OrderedDict()
generated_struct_functions = collections.OrderedDict()
struct_aliases = collections.OrderedDict()
callbacks_used_on_structs = []
def flatten_struct(struct_decls):
result = []
if not struct_decls: return result
for decl in struct_decls:
if is_struct(decl.type):
result.extend(flatten_struct(decl.type.decls))
else:
result.append(decl)
return result
def try_generate_struct(struct_name, struct):
global lv_to_mp
global mp_to_lv
if struct_name in generated_structs: return None
sanitized_struct_name = sanitize(struct_name)
generated_structs[struct_name] = False # Starting generating a struct
# print("/* Starting generating %s */" % struct_name)
if struct_name in mp_to_lv:
return mp_to_lv[struct_name]
# print('/* --> try_generate_struct %s: %s\n%s */' % (struct_name, gen.visit(struct), struct))
if not struct.decls:
if struct_name == struct.name:
return None
return try_generate_type(structs[struct.name])
flatten_struct_decls = flatten_struct(struct.decls)
# Go over fields and try to generate type convertors for each
# print('!! %s' % struct)
# print('!!! %s' % flatten_struct_decls)
write_cases = []
read_cases = []
for decl in flatten_struct_decls:
# print('/* ==> decl %s: %s */' % (gen.visit(decl), decl))
converted = try_generate_type(decl.type)
type_name = get_type(decl.type, remove_quals = True)
# print('/* --> %s: %s (%s)*/' % (decl.name, type_name, mp_to_lv[type_name] if type_name in mp_to_lv else '---'))
# Handle the case of nested struct
if not converted and is_struct(decl.type.type):
parent_name = struct_name
child_name = decl.type.declname
type_name = '%s_%s_t' % (parent_name[:-2], child_name)
print('typedef __typeof__( (({parent}*)(0))->{child} ) {new_struct};'.format(
parent = parent_name, child = child_name, new_struct = type_name))
try_generate_struct(type_name, decl.type.type)
# print('==> %s %s: %s' % (type_name, str(type_name in mp_to_lv), decl))
if (type_name not in mp_to_lv or not mp_to_lv[type_name]) or (type_name not in lv_to_mp or not lv_to_mp[type_name]):
# eprint("[%s] %s or %s : %s" % (isinstance(decl.type,c_ast.PtrDecl), type_name, get_type(decl.type), decl.type))
if type_name in generated_structs:
print("/* Already started generating %s! skipping field '%s' */" % (type_name, decl.name))
continue
raise MissingConversionException('Missing conversion to %s when generating struct %s.%s' % (type_name, struct_name, get_name(decl)))
mp_to_lv_convertor = mp_to_lv[type_name]
lv_to_mp_convertor = lv_to_mp_byref[type_name] if type_name in lv_to_mp_byref else lv_to_mp[type_name]
cast = '(void*)' if isinstance(decl.type, c_ast.PtrDecl) else '' # needed when field is const. casting to void overrides it
callback = decl_to_callback(decl)
if callback:
func_name, arg_type = callback
user_data = get_user_data(arg_type, func_name = func_name, containing_struct = struct, containing_struct_name = struct_name)
if not callback in callbacks_used_on_structs:
callbacks_used_on_structs.append(callback + (struct_name,))
# Emit callback forward decl.
if user_data in [user_data_decl.name for user_data_decl in flatten_struct_decls]:
full_user_data = 'data->%s' % user_data
full_user_data_ptr = '&%s' % full_user_data
lv_callback = '%s_%s_callback' % (struct_name, func_name)
print('STATIC %s %s_%s_callback(%s);' % (get_type(arg_type.type, remove_quals = False), struct_name, func_name, gen.visit(arg_type.args)))
else:
full_user_data = 'NULL'
full_user_data_ptr = full_user_data
lv_callback = 'NULL'
if not user_data:
gen_func_error(decl, "Missing 'user_data' as a field of the first parameter of the callback function '%s_%s_callback'" % (struct_name, func_name))
else:
gen_func_error(decl, "Missing 'user_data' member in struct '%s'" % struct_name)
write_cases.append('case MP_QSTR_{field}: data->{field} = {cast}mp_lv_callback(dest[1], {lv_callback} ,MP_QSTR_{struct_name}_{field}, {user_data}); break; // converting to callback {type_name}'.
format(struct_name = struct_name, field = sanitize(decl.name), lv_callback = lv_callback, user_data = full_user_data_ptr, type_name = type_name, cast = cast))
read_cases.append('case MP_QSTR_{field}: dest[0] = mp_lv_funcptr(&mp_{funcptr}_obj, {cast}data->{field}, {lv_callback} ,MP_QSTR_{struct_name}_{field}, {user_data}); break; // converting from callback {type_name}'.
format(struct_name = struct_name, field = sanitize(decl.name), lv_callback = lv_callback, funcptr = lv_to_mp_funcptr[type_name], user_data = full_user_data, type_name = type_name, cast = cast))
else:
user_data = None
# Arrays must be handled by memcpy, otherwise we would get "assignment to expression with array type" error
if isinstance(decl.type, c_ast.ArrayDecl):
memcpy_size = 'sizeof(%s)*%s' % (gen.visit(decl.type.type), gen.visit(decl.type.dim))
write_cases.append('case MP_QSTR_{field}: memcpy((void*)&data->{field}, {cast}{convertor}(dest[1]), {size}); break; // converting to {type_name}'.
format(field = sanitize(decl.name), convertor = mp_to_lv_convertor, type_name = type_name, cast = cast, size = memcpy_size))
read_cases.append('case MP_QSTR_{field}: dest[0] = {convertor}({cast}data->{field}); break; // converting from {type_name}'.
format(field = sanitize(decl.name), convertor = lv_to_mp_convertor, type_name = type_name, cast = cast))
else:
write_cases.append('case MP_QSTR_{field}: data->{field} = {cast}{convertor}(dest[1]); break; // converting to {type_name}'.
format(field = sanitize(decl.name), convertor = mp_to_lv_convertor, type_name = type_name, cast = cast))
read_cases.append('case MP_QSTR_{field}: dest[0] = {convertor}({cast}data->{field}); break; // converting from {type_name}'.
format(field = sanitize(decl.name), convertor = lv_to_mp_convertor, type_name = type_name, cast = cast))
print('''
/*
* Struct {struct_name}
*/
STATIC inline const mp_obj_type_t *get_mp_{sanitized_struct_name}_type();
STATIC inline {struct_tag}{struct_name}* mp_write_ptr_{sanitized_struct_name}(mp_obj_t self_in)
{{
mp_lv_struct_t *self = MP_OBJ_TO_PTR(cast(self_in, get_mp_{sanitized_struct_name}_type()));
return ({struct_tag}{struct_name}*)self->data;
}}
#define mp_write_{sanitized_struct_name}(struct_obj) *mp_write_ptr_{sanitized_struct_name}(struct_obj)
STATIC inline mp_obj_t mp_read_ptr_{sanitized_struct_name}({struct_tag}{struct_name} *field)
{{
return lv_to_mp_struct(get_mp_{sanitized_struct_name}_type(), (void*)field);
}}
#define mp_read_{sanitized_struct_name}(field) mp_read_ptr_{sanitized_struct_name}(copy_buffer(&field, sizeof({struct_tag}{struct_name})))
#define mp_read_byref_{sanitized_struct_name}(field) mp_read_ptr_{sanitized_struct_name}(&field)
STATIC void mp_{sanitized_struct_name}_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest)
{{
mp_lv_struct_t *self = MP_OBJ_TO_PTR(self_in);
{struct_tag}{struct_name} *data = ({struct_tag}{struct_name}*)self->data;
if (dest[0] == MP_OBJ_NULL) {{
// load attribute
switch(attr)
{{
{read_cases};
default: call_parent_methods(self_in, attr, dest); // fallback to locals_dict lookup
}}
}} else {{
if (dest[1])
{{
// store attribute
switch(attr)
{{
{write_cases};
default: return;
}}
dest[0] = MP_OBJ_NULL; // indicate success
}}
}}
}}
STATIC void mp_{sanitized_struct_name}_print(const mp_print_t *print,
mp_obj_t self_in,
mp_print_kind_t kind)
{{
mp_printf(print, "struct {struct_name}");
}}
STATIC const mp_obj_dict_t mp_{sanitized_struct_name}_locals_dict;
STATIC const mp_obj_type_t mp_{sanitized_struct_name}_type = {{
{{ &mp_type_type }},
.name = MP_QSTR_{sanitized_struct_name},
.print = mp_{sanitized_struct_name}_print,
.make_new = make_new_lv_struct,
.attr = mp_{sanitized_struct_name}_attr,
.locals_dict = (mp_obj_dict_t*)&mp_{sanitized_struct_name}_locals_dict,
.buffer_p = {{ .get_buffer = mp_blob_get_buffer }}
}};
STATIC inline const mp_obj_type_t *get_mp_{sanitized_struct_name}_type()
{{
return &mp_{sanitized_struct_name}_type;
}}
'''.format(
sanitized_struct_name = sanitized_struct_name,
struct_name = struct_name,
struct_tag = 'struct ' if struct_name in structs_without_typedef.keys() else '',
write_cases = ';\n '.join(write_cases),
read_cases = ';\n '.join(read_cases),
));
lv_to_mp[struct_name] = 'mp_read_%s' % sanitized_struct_name
lv_to_mp_byref[struct_name] = 'mp_read_byref_%s' % sanitized_struct_name
mp_to_lv[struct_name] = 'mp_write_%s' % sanitized_struct_name
lv_to_mp['%s *' % struct_name] = 'mp_read_ptr_%s' % sanitized_struct_name
mp_to_lv['%s *' % struct_name] = 'mp_write_ptr_%s' % sanitized_struct_name
lv_to_mp['const %s *' % struct_name] = 'mp_read_ptr_%s' % sanitized_struct_name
mp_to_lv['const %s *' % struct_name] = 'mp_write_ptr_%s' % sanitized_struct_name
lv_mp_type[struct_name] = simplify_identifier(sanitized_struct_name)
lv_mp_type['%s *' % struct_name] = simplify_identifier(sanitized_struct_name)
lv_mp_type['const %s *' % struct_name] = simplify_identifier(sanitized_struct_name)
# print('/* --> struct "%s" generated! */' % (struct_name))
generated_structs[struct_name] = True # Completed generating a struct
return struct_name
#
# Generate Array Types when needed
#
def try_generate_array_type(type_ast):
arr_name = get_name(type_ast)
if arr_name in mp_to_lv:
return mp_to_lv[arr_name]
# print('/* --> try_generate_array_type %s: %s */' % (arr_name, type_ast))
dim = gen.visit(type_ast.dim) if hasattr(type_ast, 'dim') and type_ast.dim else None
element_type = get_type(type_ast.type, remove_quals = True)
qualified_element_type = gen.visit(type_ast.type)
if element_type not in mp_to_lv or not mp_to_lv[element_type]:
try_generate_type(type_ast.type)
if element_type not in mp_to_lv or not mp_to_lv[element_type]:
raise MissingConversionException('Missing conversion to %s while generating array type conversion' % element_type)
array_convertor_suffix = arr_name.\
replace(' ','_').\
replace('*','ptr').\
replace('+','plus').\
replace('-','minus').\
replace('[','__').\
replace(']','__').\
replace('(','__').\
replace(')','__').\
replace('/','_div_')
arr_to_c_convertor_name = 'mp_arr_to_%s' % array_convertor_suffix
arr_to_mp_convertor_name = 'mp_arr_from_%s' % array_convertor_suffix
print('''
/*
* Array convertors for {arr_name}
*/
STATIC {qualified_type} *{arr_to_c_convertor_name}(mp_obj_t mp_arr)
{{
mp_obj_t mp_len = mp_obj_len_maybe(mp_arr);
if (mp_len == MP_OBJ_NULL) return mp_to_ptr(mp_arr);
mp_int_t len = mp_obj_get_int(mp_len);
{check_dim}
{struct_tag}{type} *lv_arr = ({struct_tag}{type}*)m_malloc(len * sizeof({struct_tag}{type}));
mp_obj_t iter = mp_getiter(mp_arr, NULL);
mp_obj_t item;
size_t i = 0;
while ((item = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {{
lv_arr[i++] = {mp_to_lv_convertor}(item);
}}
return ({qualified_type} *)lv_arr;
}}
STATIC mp_obj_t {arr_to_mp_convertor_name}({qualified_type} *arr)
{{
mp_obj_t obj_arr[{dim}];
for (size_t i=0; i<{dim}; i++){{
obj_arr[i] = {lv_to_mp_convertor}(arr[i]);
}}
return mp_obj_new_list({dim}, obj_arr); // TODO: return custom iterable object!
}}
'''.format(
arr_to_c_convertor_name = arr_to_c_convertor_name,
arr_to_mp_convertor_name = arr_to_mp_convertor_name ,
arr_name = arr_name,
type = element_type,
struct_tag = 'struct ' if element_type in structs_without_typedef.keys() else '',
qualified_type = qualified_element_type,
check_dim = '//TODO check dim!' if dim else '',
mp_to_lv_convertor = mp_to_lv[element_type],
lv_to_mp_convertor = lv_to_mp[element_type],
dim = dim if dim else 1,
))
mp_to_lv[arr_name] = arr_to_c_convertor_name
mp_to_lv['const %s' % arr_name] = arr_to_c_convertor_name
lv_to_mp[arr_name] = arr_to_mp_convertor_name
lv_to_mp['const %s' % arr_name] = arr_to_mp_convertor_name
lv_mp_type[arr_name] = arr_to_c_convertor_name
lv_mp_type['const %s' % arr_name] = 'const %s' % arr_to_c_convertor_name
return arr_to_c_convertor_name
#
# Generate types from typedefs when needed
#
def get_arg_name(arg):
if isinstance(arg, c_ast.PtrDecl) or isinstance(arg, c_ast.FuncDecl):
return get_arg_name(arg.type)
if hasattr(arg, 'declname'): return arg.declname
if hasattr(arg, 'name'): return name
return 'unnamed_arg'
# print("// Typedefs: " + ", ".join(get_arg_name(t) for t in typedefs))
def try_generate_type(type_ast):
# eprint(' --> try_generate_type %s : %s' % (get_name(type_ast), gen.visit(type_ast)))
# print('/* --> try_generate_type %s: %s */' % (get_name(type_ast), type_ast))
if isinstance(type_ast, str): raise SyntaxError('Internal error! try_generate_type argument is a string.')
# Handle the case of a pointer
if isinstance(type_ast, c_ast.TypeDecl):
return try_generate_type(type_ast.type)
type = get_name(type_ast)
if isinstance(type_ast, c_ast.Enum):
mp_to_lv[type] = mp_to_lv['int']
lv_to_mp[type] = lv_to_mp['int']
lv_mp_type[type] = type_ast.name
return mp_to_lv[type]
if type in mp_to_lv:
return mp_to_lv[type]
if isinstance(type_ast, c_ast.ArrayDecl) and try_generate_array_type(type_ast):
return mp_to_lv[type]
if isinstance(type_ast, (c_ast.PtrDecl, c_ast.ArrayDecl)):
type = get_name(type_ast.type.type)
ptr_type = get_type(type_ast, remove_quals=True)
# print('/* --> try_generate_type IS PtrDecl!! %s: %s */' % (type, type_ast))
if (type in structs):
try_generate_struct(type, structs[type]) if type in structs else None
if isinstance(type_ast.type, c_ast.TypeDecl) and isinstance(type_ast.type.type, c_ast.Struct) and (type_ast.type.type.name in structs):
try_generate_struct(type, structs[type_ast.type.type.name])
if isinstance(type_ast.type, c_ast.FuncDecl):
if isinstance(type_ast.type.type.type, c_ast.TypeDecl): type = type_ast.type.type.type.declname
func_ptr_name = "funcptr_%s" % type
i = 1
while func_ptr_name in generated_funcs: # Make sure func_ptr_name is unique
func_ptr_name = "funcptr_%s_%d" % (type,i)
i += 1
func = c_ast.Decl(
name=func_ptr_name,
quals=[],
storage=[],
funcspec=[],
type=type_ast.type,
init=None,
bitsize=None)
try:
print("#define %s NULL\n" % func_ptr_name)
gen_mp_func(func, None)
print("STATIC inline mp_obj_t mp_lv_{f}(void *func){{ return mp_lv_funcptr(&mp_{f}_obj, func, NULL, MP_QSTR_, NULL); }}\n".format(
f=func_ptr_name))
lv_to_mp_funcptr[ptr_type] = func_ptr_name
# eprint("/* --> lv_to_mp_funcptr[%s] = %s */" % (ptr_type, func_ptr_name))
lv_to_mp[ptr_type] = "mp_lv_%s" % func_ptr_name
lv_mp_type[ptr_type] = 'function pointer'
except MissingConversionException as exp:
gen_func_error(func, exp)
# print('/* --> PTR %s */' % ptr_type)
if not ptr_type in mp_to_lv: mp_to_lv[ptr_type] = mp_to_lv['void *']
if not ptr_type in lv_to_mp: lv_to_mp[ptr_type] = lv_to_mp['void *']
if not ptr_type in lv_mp_type: lv_mp_type[ptr_type] = 'pointer'
return mp_to_lv[ptr_type]
if type in structs:
if try_generate_struct(type, structs[type]):
return mp_to_lv[type]
for new_type_ast in [x for x in typedefs if get_arg_name(x) == type]:
new_type = get_type(new_type_ast, remove_quals=True)
if isinstance(new_type_ast, c_ast.TypeDecl) and isinstance(new_type_ast.type, c_ast.Struct) and not new_type_ast.type.decls:
explicit_struct_name = new_type_ast.type.name if hasattr(new_type_ast.type, 'name') else new_type_ast.type.names[0]
else:
explicit_struct_name = new_type
if type == explicit_struct_name:
continue
# eprint('/* --> typedef: %s --> %s (%s) */' % (type, new_type, new_type_ast))
if explicit_struct_name in structs:
if (try_generate_struct(new_type, structs[explicit_struct_name])):
if explicit_struct_name == new_type:
struct_aliases[new_type] = type
if try_generate_type(new_type_ast):
# eprint('/* --> try_generate_type TYPEDEF!! %s: %s */' % (type, mp_to_lv[new_type]))
mp_to_lv[type] = mp_to_lv[new_type]
type_ptr = '%s *' % type
new_type_ptr = '%s *' % new_type
if new_type_ptr in mp_to_lv:
mp_to_lv[type_ptr] = mp_to_lv[new_type_ptr]
if new_type in lv_to_mp:
lv_to_mp[type] = lv_to_mp[new_type]
lv_mp_type[type] = lv_mp_type[new_type]
if new_type in lv_to_mp_funcptr:
lv_to_mp_funcptr[type] = lv_to_mp_funcptr[new_type]
if new_type in lv_to_mp_byref:
lv_to_mp_byref[type] = lv_to_mp_byref[new_type]
if new_type_ptr in lv_to_mp:
lv_to_mp[type_ptr] = lv_to_mp[new_type_ptr]
if new_type_ptr in lv_mp_type:
lv_mp_type[type_ptr] = lv_mp_type[new_type_ptr]
# eprint('/* --> %s = (%s) */' % (type, new_type))
return mp_to_lv[type]
return None
#
# Helper structs
#
def create_helper_struct(struct_str):
print(struct_str)
struct_str_ast = parser.parse(struct_str).ext[0].type
struct_name = get_name(struct_str_ast)
# print('/* --> %s: %s */' % (struct_name, struct_str_ast.type))
structs[struct_name] = struct_str_ast.type
try:
try_generate_struct(struct_name, struct_str_ast.type)
except MissingConversionException as exp:
print ('/* Helper structs NOT generated:\n %s\n*/' % (repr(exp)))
print('''
/*
* Helper Structs
*/
''')
create_helper_struct('''
typedef union {
void* ptr_val;
const char* str_val;
int int_val;
unsigned int uint_val;
short short_val[sizeof(void*) / sizeof(short)];
unsigned short ushort_val[sizeof(void*) / sizeof(unsigned short)];
char char_val[sizeof(void*) / sizeof(char)];
unsigned char uchar_val[sizeof(void*) / sizeof(unsigned char)];
} C_Pointer;
''')
#
# Emit C callback functions
#
generated_callbacks = collections.OrderedDict()
def build_callback_func_arg(arg, index, func, func_name = None):
arg_type = get_type(arg.type, remove_quals = True)
cast = '(void*)' if isinstance(arg.type, c_ast.PtrDecl) else '' # needed when field is const. casting to void overrides it
if arg_type not in lv_to_mp or not lv_to_mp[arg_type]:
try_generate_type(arg.type)
if arg_type not in lv_to_mp or not lv_to_mp[arg_type]:
raise MissingConversionException("Callback: Missing conversion to %s" % arg_type)
arg_metadata = {'type': lv_mp_type[arg_type]}
if arg.name: arg_metadata['name'] = arg.name
callback_metadata[func_name]['args'].append(arg_metadata)
return 'mp_args[{i}] = {convertor}({cast}arg{i});'.format(
convertor = lv_to_mp[arg_type],
i = index, cast = cast)
def gen_callback_func(func, func_name = None):
global mp_to_lv
if func_name in generated_callbacks:
return
# print('/* --> callback: %s */' % (gen.visit(func)))
callback_metadata[func_name] = {'args':[]}
args = func.args.params
if not func_name: func_name = get_arg_name(func.type)
# print('/* --> callback: func_name = %s */' % func_name)
if is_global_callback(func):
full_user_data = 'MP_STATE_PORT(mp_lv_user_data)'
else:
user_data = get_user_data(func, func_name)
if user_data:
full_user_data = 'arg0->%s' % user_data
if len(args) < 1 or hasattr(args[0].type.type, 'names') and lv_base_obj_pattern.match(args[0].type.type.names[0]):
raise MissingConversionException("Callback: First argument of callback function must be lv_obj_t")
elif len(args) > 0 and gen.visit(args[-1]) == 'void *':
full_user_data = 'arg%d' % (len(args) - 1)
else:
full_user_data = None
# if full_user_data: print('/* --> callback: %s user_data found!! %s */' %(gen.visit(func), full_user_data))
# else: print('/* --> callback: full_user_data NOT FOUND !! %s */' % (gen.visit(func)))
if not full_user_data:
raise MissingConversionException("Callback: user_data NOT FOUND! %s" % (gen.visit(func)))
return_type = get_type(func.type, remove_quals = False)
if return_type != 'void' and (return_type not in mp_to_lv or not mp_to_lv[return_type]):
try_generate_type(func.type)
if return_type not in mp_to_lv or not mp_to_lv[return_type]:
raise MissingConversionException("Callback return value: Missing conversion to %s" % return_type)
callback_metadata[func_name]['return_type'] = lv_mp_type[return_type]
print("""
/*
* Callback function {func_name}
* {func_prototype}
*/
STATIC {return_type} {func_name}_callback({func_args})
{{
mp_obj_t mp_args[{num_args}];
{build_args}
mp_obj_t callbacks = get_callback_dict_from_user_data({user_data});
{return_value_assignment}mp_call_function_n_kw(mp_obj_dict_get(callbacks, MP_OBJ_NEW_QSTR(MP_QSTR_{func_name})) , {num_args}, 0, mp_args);
return{return_value};
}}
""".format(
func_prototype = gen.visit(func),
func_name = sanitize(func_name),
return_type = return_type,
func_args = ', '.join(["%s arg%s" % (gen.visit(arg.type), i) for i,arg in enumerate(args)]),
num_args=len(args),
build_args="\n ".join([build_callback_func_arg(arg, i, func, func_name=func_name) for i,arg in enumerate(args)]),
user_data=full_user_data,
return_value_assignment = '' if return_type == 'void' else 'mp_obj_t callback_result = ',
return_value='' if return_type == 'void' else ' %s(callback_result)' % mp_to_lv[return_type]))
generated_callbacks[func_name] = True
#
# Emit Mpy function definitions
#
generated_funcs = collections.OrderedDict()
def build_mp_func_arg(arg, index, func, obj_name):
if isinstance(arg, c_ast.EllipsisParam):
raise MissingConversionException("Cannot convert ellipsis param")
fixed_arg = copy.deepcopy(arg)
convert_array_to_ptr(fixed_arg)
if not fixed_arg.name:
fixed_arg.name = "arg%d" % index
add_default_declname(fixed_arg, fixed_arg.name)
# print('/* --> FIXED ARG: %s */' % repr(fixed_arg))
callback = decl_to_callback(arg)
args = func.type.args.params if func.type.args else []
# print('/* --> ARG: %s */' % arg)
# print('/* --> FIRST ARG: %s */' % first_arg)
if callback:
# Callback is supported in two modes:
# 1) last argument is a void* user_data which is passed to callback
# 2) first argument is a struct with user_data field, which is passed to callback
func_name, arg_type = callback
# print('/* --> callback %s ARG TYPE: %s */' % (func_name, arg_type))
try:
if len(args) > 0 and gen.visit(args[-1].type) == 'void *' and args[-1].name == 'user_data':
callback_name = '%s' % (func_name)
full_user_data = '&user_data'
else:
first_arg = args[0]
struct_name = get_name(first_arg.type.type.type if hasattr(first_arg.type.type,'type') else first_arg.type.type)
callback_name = '%s_%s' % (struct_name, func_name)
user_data = get_user_data(arg_type, func_name)
if is_global_callback(arg_type):
full_user_data = '&MP_STATE_PORT(mp_lv_user_data)'
else:
full_user_data = '&%s->%s' % (first_arg.name, user_data) if user_data else None
if index == 0:
raise MissingConversionException("Callback argument '%s' cannot be the first argument! We assume the first argument contains the user_data" % gen.visit(arg))
if not full_user_data:
raise MissingConversionException("Callback function '%s' must receive a struct pointer with user_data member as its first argument!" % gen.visit(arg))
# eprint("--> callback_metadata= %s_%s" % (struct_name, func_name))
gen_callback_func(arg_type, '%s' % callback_name)
arg_metadata = {'type': 'callback', 'function': callback_metadata[callback_name]}
if arg.name: arg_metadata['name'] = arg.name
func_metadata[func.name]['args'].append(arg_metadata)
return 'void *{arg_name} = mp_lv_callback(mp_args[{i}], &{callback_name}_callback, MP_QSTR_{callback_name}, {full_user_data});'.format(
i = index,
arg_name = fixed_arg.name,
callback_name = sanitize(callback_name),
full_user_data = full_user_data)
except MissingConversionException as exp:
gen_func_error(arg, exp)
callback_name = 'NULL'
full_user_data = 'NULL'
if not hasattr(arg, 'type'):
raise MissingConversionException("Cannot convert function argument %s" % repr(arg))
arg_type = get_type(arg.type, remove_quals = True)
# print('/* --> arg = %s, arg_type = %s */' %(gen.visit(arg), arg_type))
if arg_type not in mp_to_lv or not mp_to_lv[arg_type]:
try_generate_type(arg.type)
if arg_type not in mp_to_lv or not mp_to_lv[arg_type]:
raise MissingConversionException('Missing conversion to %s' % arg_type)
arg_metadata = {'type': lv_mp_type[arg_type]}
if arg.name: arg_metadata['name'] = arg.name
func_metadata[func.name]['args'].append(arg_metadata)
return '{var} = {convertor}(mp_args[{i}]);'.format(
var = gen.visit(fixed_arg),
convertor = mp_to_lv[arg_type],
i = index)
def emit_func_obj(func_obj_name, func_name, param_count, func_ptr, is_static):
print("""
STATIC {builtin_macro}(mp_{func_obj_name}_obj, {param_count}, mp_{func_name}, {func_ptr});
""".format(
func_obj_name = func_obj_name,
func_name = func_name,
func_ptr = func_ptr,
param_count = param_count,
builtin_macro='MP_DEFINE_CONST_LV_FUN_OBJ_STATIC_VAR' if is_static else 'MP_DEFINE_CONST_LV_FUN_OBJ_VAR'))
def gen_mp_func(func, obj_name):
# print('/* gen_mp_func: %s : %s */' % (obj_name, func))
if func.name in generated_funcs:
print("""
/*
* WARNING: %s was declared more than once!
*/
""" % func.name)
return
# print("/* gen_mp_func %s */" % func.name)
generated_funcs[func.name] = False # starting to generate the function
func_metadata[func.name] = {'type': 'function', 'args':[]}
args = func.type.args.params if func.type.args else []
enumerated_args = enumerate(args)
# Handle the case of a single function argument which is "void"
if len(args)==1 and get_type(args[0].type, remove_quals = True) == "void":
param_count = 0
args = []
else:
param_count = len(args)
# If func prototype matches an already generated func, reuse it and only emit func obj that points to it.
prototype_str = gen.visit(function_prototype(func))
if prototype_str in func_prototypes:
original_func = func_prototypes[prototype_str]
if generated_funcs[original_func.name] == True:
print("/* Reusing %s for %s */" % (original_func.name, func.name))
emit_func_obj(func.name, original_func.name, param_count, func.name, is_static_member(func, base_obj_type))
func_metadata[func.name]['return_type'] = func_metadata[original_func.name]['return_type']
func_metadata[func.name]['args'] = func_metadata[original_func.name]['args']
generated_funcs[func.name] = True # completed generating the function
return
func_prototypes[prototype_str] = func
# user_data argument must be handled first, if it exists
try:
i = [(arg.name if hasattr(arg, 'name') else None) for arg in args].index('user_data')
if i>0:
enumerated_args = [(i, arg) for i, arg in enumerated_args] # convert enumerate to list
enumerated_args[0], enumerated_args[i] = enumerated_args[i], enumerated_args[0]
except ValueError:
pass
return_type = get_type(func.type.type, remove_quals = False)
if isinstance(func.type.type, c_ast.PtrDecl) and lv_func_returns_array.match(func.name):
try_generate_array_type(func.type.type)
# print('/* --> return_type = %s, func.type.type = %s\n%s */' % (return_type, gen.visit(func.type.type), func.type.type))
if return_type == "void":
build_result = ""
build_return_value = "mp_const_none"
func_metadata[func.name]['return_type'] = 'NoneType'
else:
if return_type not in lv_to_mp or not lv_to_mp[return_type]:
try_generate_type(func.type.type)
if return_type not in lv_to_mp or not lv_to_mp[return_type]:
raise MissingConversionException("Missing convertion from %s" % return_type)
build_result = "%s _res = " % return_type
cast = '(void*)' if isinstance(func.type.type, c_ast.PtrDecl) else '' # needed when field is const. casting to void overrides it
build_return_value = "{type}({cast}_res)".format(type = lv_to_mp[return_type], cast = cast)
func_metadata[func.name]['return_type'] = lv_mp_type[return_type]
print("""
/*
* {module_name} extension definition for:
* {print_func}
*/
STATIC mp_obj_t mp_{func}(size_t mp_n_args, const mp_obj_t *mp_args, void *lv_func_ptr)
{{
{build_args}
{build_result}(({func_ptr})lv_func_ptr)({send_args});
return {build_return_value};
}}
""".format(
module_name = module_name,
func=func.name,
func_ptr=prototype_str,
print_func=gen.visit(func),
build_args="\n ".join([build_mp_func_arg(arg, i, func, obj_name) for i,arg in enumerated_args
if isinstance(arg, c_ast.EllipsisParam) or
(not isinstance(arg.type, c_ast.TypeDecl)) or
(not isinstance(arg.type.type, c_ast.IdentifierType)) or
'void' not in arg.type.type.names]), # Handle the case of 'void' param which should be ignored
send_args=", ".join([(arg.name if (hasattr(arg, 'name') and arg.name) else ("arg%d" % i)) for i,arg in enumerate(args)]),
build_result=build_result,
build_return_value=build_return_value))
emit_func_obj(func.name, func.name, param_count, func.name, is_static_member(func, base_obj_type))
generated_funcs[func.name] = True # completed generating the function
# print('/* is_struct_function() = %s, is_static_member() = %s, get_first_arg_type()=%s, obj_name = %s */' % (
# is_struct_function(func), is_static_member(func, base_obj_type), get_first_arg_type(func), base_obj_type))
def gen_func_error(method, exp):
global funcs
print("""
/*
* Function NOT generated:
* {problem}
* {method}
*/
""".format(method=gen.visit(method) if isinstance(method, c_ast.Node) else method, problem=exp))
try:
funcs.remove(method)
except:
pass
#
# Emit Mpy objects definitions
#
enum_referenced = collections.OrderedDict()
def gen_obj_methods(obj_name):
global enums
helper_members = ["{ MP_ROM_QSTR(MP_QSTR___cast__), MP_ROM_PTR(&cast_obj_class_method) }"] if len(obj_names) > 0 and obj_name == base_obj_name else []
members = ["{{ MP_ROM_QSTR(MP_QSTR_{method_name}), MP_ROM_PTR(&mp_{method}_obj) }}".
format(method=method.name, method_name=sanitize(method_name_from_func_name(method.name))) for method in get_methods(obj_name)]
obj_metadata[obj_name]['members'].update({method_name_from_func_name(method.name): func_metadata[method.name] for method in get_methods(obj_name)})
# add parent methods
parent_members = []
if obj_name in parent_obj_names and parent_obj_names[obj_name] != None:
# parent_members += gen_obj_methods(parent_obj_names[obj_name])
obj_metadata[obj_name]['members'].update(obj_metadata[parent_obj_names[obj_name]]['members'])
# add enum members
enum_members = ["{{ MP_ROM_QSTR(MP_QSTR_{enum_member}), MP_ROM_PTR({enum_member_value}) }}".
format(enum_member = sanitize(get_enum_member_name(enum_member_name)), enum_member_value = get_enum_value(obj_name, enum_member_name)) for enum_member_name in get_enum_members(obj_name)]
obj_metadata[obj_name]['members'].update({get_enum_member_name(enum_member_name): {'type':'enum_member'} for enum_member_name in get_enum_members(obj_name)})
# add enums that match object name
obj_enums = [enum_name for enum_name in enums.keys() if is_method_of(enum_name, obj_name)]
enum_types = ["{{ MP_ROM_QSTR(MP_QSTR_{name}), MP_ROM_PTR(&mp_{enum}_type) }}".
format(name=sanitize(method_name_from_func_name(enum_name)), enum=enum_name) for enum_name in obj_enums]
obj_metadata[obj_name]['members'].update({method_name_from_func_name(enum_name): {'type':'enum_type'} for enum_name in obj_enums})
for enum_name in obj_enums:
obj_metadata[obj_name]['members'][method_name_from_func_name(enum_name)].update(obj_metadata[enum_name])
enum_referenced[enum_name] = True
return members + parent_members + enum_members + enum_types + helper_members
def gen_obj(obj_name):
is_obj = has_ctor(obj_name)
should_add_base_methods = is_obj and obj_name != 'obj'
obj_metadata[obj_name] = {'members' : collections.OrderedDict()}
for method in get_methods(obj_name):
try:
gen_mp_func(method, obj_name)
except MissingConversionException as exp:
gen_func_error(method, exp)
# print([method.name for method in methods])
ctor = """
STATIC mp_obj_t {obj}_make_new(
const mp_obj_type_t *type,
size_t n_args,
size_t n_kw,
const mp_obj_t *args)
{{
return make_new(&lv_{obj}_create, type, n_args, n_kw, args);
}}
"""
print("""
/*
* {module_name} {obj} object definitions
*/
""".format(
module_name = module_name,
obj = obj_name))
print("""
STATIC const mp_rom_map_elem_t {obj}_locals_dict_table[] = {{
{locals_dict_entries}
}};
STATIC MP_DEFINE_CONST_DICT({obj}_locals_dict, {obj}_locals_dict_table);
STATIC void {obj}_print(const mp_print_t *print,
mp_obj_t self_in,
mp_print_kind_t kind)
{{
mp_printf(print, "{module_name} {obj}");
}}
{ctor}
STATIC const mp_obj_type_t mp_{obj}_type = {{
{{ &mp_type_type }},
.name = MP_QSTR_{obj},
.print = {obj}_print,
{make_new}
.attr = call_parent_methods,
.locals_dict = (mp_obj_dict_t*)&{obj}_locals_dict,
{buffer_p}
.parent = {parent},
}};
""".format(
module_name = module_name,
obj = sanitize(obj_name), base_obj = base_obj_name,
base_class = '&mp_%s_type' % base_obj_name if should_add_base_methods else 'NULL',
locals_dict_entries = ",\n ".join(gen_obj_methods(obj_name)),
ctor = ctor.format(obj = obj_name) if has_ctor(obj_name) else '',
make_new = '.make_new = %s_make_new,' % obj_name if is_obj else '',
buffer_p = '.buffer_p = { .get_buffer = mp_lv_obj_get_buffer },' if is_obj else '',
parent = '&mp_%s_type' % parent_obj_names[obj_name] if obj_name in parent_obj_names and parent_obj_names[obj_name] else 'NULL',
))
#
# Generate Enum objects
#
for enum_name in list(enums.keys()):
gen_obj(enum_name)
#
# Generate all other objects. Generate parent objects first
#
generated_obj_names = collections.OrderedDict()
for obj_name in obj_names:
# eprint("--> %s [%s]" % (obj_name, ", ".join([name for name in generated_obj_names])))
parent_obj_name = parent_obj_names[obj_name] if obj_name in parent_obj_names else None
while parent_obj_name != None and not parent_obj_name in generated_obj_names:
gen_obj(parent_obj_name)
generated_obj_names[parent_obj_name] = True
parent_obj_name = parent_obj_names[parent_obj_name] if parent_obj_name in parent_obj_names else None
if not obj_name in generated_obj_names:
# eprint("--> gen obj %s" % obj_name)
gen_obj(obj_name)
generated_obj_names[obj_name] = True
#
# Generate structs which contain function members
# First argument of a function could be it's parent struct
# Need to make sure these structs are generated *before* struct-functions are
# Otherwise we will not know of all the structs when generating struct-functions
#
def try_generate_structs_from_first_argument():
for func in funcs:
if func.name in generated_funcs: continue
args = func.type.args.params if func.type.args else []
if len(args) < 1: continue
arg_type = get_type(args[0].type, remove_quals = True)
if arg_type not in mp_to_lv or not mp_to_lv[arg_type]:
try:
try_generate_type(args[0].type)
except MissingConversionException as e:
print('''
/*
* {struct} not generated: {err}
*/
'''.format(struct=arg_type, err=e))
#
# Generate struct-functions
#
# eprint("/* Generating struct-functions */")
try_generate_structs_from_first_argument()
def generate_struct_functions(struct_list):
# print('/* List of structs: %s */' % repr(struct_list))
for struct_name in struct_list:
if not generated_structs[struct_name]: continue
sanitized_struct_name = sanitize(struct_name)
struct_funcs = get_struct_functions(struct_name)
# print('/* Struct %s contains: %s */' % (struct_name, [f.name for f in struct_funcs]))
for struct_func in struct_funcs[:]: # clone list because we are changing it in the loop.
try:
if struct_func.name not in generated_funcs:
gen_mp_func(struct_func, struct_name)
except MissingConversionException as exp:
gen_func_error(struct_func, exp)
struct_funcs.remove(struct_func)
print('''
STATIC const mp_rom_map_elem_t mp_{sanitized_struct_name}_locals_dict_table[] = {{
{{ MP_ROM_QSTR(MP_QSTR_SIZE), MP_ROM_PTR(MP_ROM_INT(sizeof({struct_tag}{struct_name}))) }},
{{ MP_ROM_QSTR(MP_QSTR_cast), MP_ROM_PTR(&mp_lv_cast_class_method) }},
{{ MP_ROM_QSTR(MP_QSTR_cast_instance), MP_ROM_PTR(&mp_lv_cast_instance_obj) }},
{{ MP_ROM_QSTR(MP_QSTR___dereference__), MP_ROM_PTR(&mp_lv_dereference_obj) }},
{functions}
}};
STATIC MP_DEFINE_CONST_DICT(mp_{sanitized_struct_name}_locals_dict, mp_{sanitized_struct_name}_locals_dict_table);
'''.format(
struct_name = struct_name,
struct_tag = 'struct ' if struct_name in structs_without_typedef.keys() else '',
sanitized_struct_name = sanitized_struct_name,
functions = ''.join(['{{ MP_ROM_QSTR(MP_QSTR_{name}), MP_ROM_PTR(&mp_{func}_obj) }},\n '.
format(name = sanitize(noncommon_part(f.name, struct_name)), func = f.name) for f in struct_funcs]),
))
generated_struct_functions[struct_name] = True
generate_struct_functions(list(generated_structs.keys()))
#
# Generate all module functions (not including method functions which were already generated)
#
print("""
/*
*
* Global Module Functions
*
*/
""")
# eprint("/* Generating global module functions /*")
module_funcs = [func for func in funcs if not func.name in generated_funcs]
for module_func in module_funcs[:]: # clone list because we are changing it in the loop.
if module_func.name in generated_funcs:
continue # generated_funcs could change inside the loop so need to recheck.
try:
gen_mp_func(module_func, None)
# A new function can create new struct with new function structs
new_structs = [s for s in generated_structs if s not in generated_struct_functions]
if new_structs:
generate_struct_functions(new_structs)
except MissingConversionException as exp:
gen_func_error(module_func, exp)
module_funcs.remove(module_func)
functions_not_generated = [func.name for func in funcs if func.name not in generated_funcs]
if len(functions_not_generated) > 0:
print("""
/*
* Functions not generated:
* {funcs}
*
*/
""".format(funcs = "\n * ".join(functions_not_generated)))
#
# Generate globals
#
# eprint("/* Generating globals */")
def gen_global(global_name, global_type_ast):
global_type = get_type(global_type_ast, remove_quals=True)
try_generate_type(global_type_ast)
if global_type not in generated_structs:
raise MissingConversionException('Missing conversion to %s when generating global %s' % (global_type, global_name))
print("""
/*
* {module_name} {global_name} global definitions
*/
STATIC const mp_lv_struct_t mp_{global_name} = {{
{{ &mp_{struct_name}_type }},
({cast}*)&{global_name}
}};
""".format(
module_name = module_name,
global_name = global_name,
struct_name = global_type,
sanitized_struct_name = sanitize(global_type),
cast = gen.visit(global_type_ast)))
generated_globals = []
for global_name in blobs:
try:
gen_global(global_name, blobs[global_name])
generated_globals.append(global_name)
except MissingConversionException as exp:
gen_func_error(global_name, exp)
#
# Generate callback functions
#
# for func_typedef in func_typedefs:
# func = func_typedef.type.type
# try:
# gen_callback_func(func)
# except MissingConversionException as exp:
# gen_func_error(func, exp)
# func_name = get_arg_name(func.type)
# lv_to_mp[func_name] = lv_to_mp['void *']
# mp_to_lv[func_name] = mp_to_lv['void *']
# eprint("/* Generating callback functions */")
for (func_name, func, struct_name) in callbacks_used_on_structs:
try:
# print('/* --> gen_callback_func %s */' % func_name)
gen_callback_func(func, func_name = '%s_%s' % (struct_name, func_name))
except MissingConversionException as exp:
gen_func_error(func, exp)
# func_name = get_arg_name(func.type)
# lv_to_mp[func_name] = lv_to_mp['void *']
# mp_to_lv[func_name] = mp_to_lv['void *']
#
# Emit Mpy Module definition
#
# eprint("/* Generating module definition */")
print("""
/*
* {module_name} module definitions
*/
STATIC const mp_rom_map_elem_t {module_name}_globals_table[] = {{
{{ MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_{module_name}) }},
{objects}
{functions}
{enums}
{structs}
{struct_aliases}
{blobs}
{int_constants}
}};
""".format(
module_name = sanitize(module_name),
objects = ''.join(['{{ MP_ROM_QSTR(MP_QSTR_{obj}), MP_ROM_PTR(&mp_{obj}_type) }},\n '.
format(obj = sanitize(o)) for o in obj_names]),
functions = ''.join(['{{ MP_ROM_QSTR(MP_QSTR_{name}), MP_ROM_PTR(&mp_{func}_obj) }},\n '.
format(name = sanitize(simplify_identifier(f.name)), func = f.name) for f in module_funcs]),
enums = ''.join(['{{ MP_ROM_QSTR(MP_QSTR_{name}), MP_ROM_PTR(&mp_{enum}_type) }},\n '.
format(name = sanitize(get_enum_name(enum_name)), enum=enum_name) for enum_name in enums.keys() if enum_name not in enum_referenced]),
structs = ''.join(['{{ MP_ROM_QSTR(MP_QSTR_{name}), MP_ROM_PTR(&mp_{struct_name}_type) }},\n '.
format(name = sanitize(simplify_identifier(struct_name)), struct_name = sanitize(struct_name)) for struct_name in generated_structs \
if generated_structs[struct_name]]),
struct_aliases = ''.join(['{{ MP_ROM_QSTR(MP_QSTR_{alias_name}), MP_ROM_PTR(&mp_{struct_name}_type) }},\n '.
format(struct_name = sanitize(struct_name), alias_name = sanitize(simplify_identifier(struct_aliases[struct_name]))) for struct_name in struct_aliases.keys()]),
blobs = ''.join(['{{ MP_ROM_QSTR(MP_QSTR_{name}), MP_ROM_PTR(&mp_{global_name}) }},\n '.
format(name = sanitize(simplify_identifier(global_name)), global_name = global_name) for global_name in generated_globals]),
int_constants = ''.join(['{{ MP_ROM_QSTR(MP_QSTR_{name}), MP_ROM_PTR(MP_ROM_INT({value})) }},\n '.
format(name = sanitize(get_enum_name(int_constant)), value = int_constant) for int_constant in int_constants])))
print("""
STATIC MP_DEFINE_CONST_DICT (
mp_module_{module_name}_globals,
{module_name}_globals_table
);
const mp_obj_module_t mp_module_{module_name} = {{
.base = {{ &mp_type_module }},
.globals = (mp_obj_dict_t*)&mp_module_{module_name}_globals
}};
""".format(
module_name = module_name,
))
# Save Metadata File, if specified.
if args.metadata:
metadata = collections.OrderedDict()
metadata['objects'] = {obj_name: obj_metadata[obj_name] for obj_name in obj_names}
metadata['functions'] = {simplify_identifier(f.name): func_metadata[f.name] for f in module_funcs}
metadata['enums'] = {get_enum_name(enum_name): obj_metadata[enum_name] for enum_name in enums.keys() if enum_name not in enum_referenced}
metadata['structs'] = [simplify_identifier(struct_name) for struct_name in generated_structs if struct_name in generated_structs]
metadata['structs'] += [simplify_identifier(struct_aliases[struct_name]) for struct_name in struct_aliases.keys()]
metadata['blobs'] = [simplify_identifier(global_name) for global_name in generated_globals]
metadata['int_constants'] = [get_enum_name(int_constant) for int_constant in int_constants]
# TODO: struct functions
with open(args.metadata, 'w') as metadata_file:
json.dump(metadata, metadata_file, indent=4)
|
the-stack_0_24697
|
'''
Often, we want to get a count for each record with a particular value in another column. The .group_by() method helps answer this type of query. You can pass a column to the .group_by() method and use in an aggregate function like sum() or count(). Much like the .order_by() method, .group_by() can take multiple columns as arguments.
'''
# Import func
from sqlalchemy import func
# Build a query to select the state and count of ages by state: stmt
stmt = select([census.columns.state, func.count(census.columns.age)])
# Group stmt by state
stmt = stmt.group_by(census.columns.state)
# Execute the statement and store all the records: results
results = connection.execute(stmt).fetchall()
# Print results
print(results)
# Print the keys/column names of the results returned
print(results[0].keys())
|
the-stack_0_24698
|
# clauses is a set of clauses and literals is a set of literals. The call
# davis_putnam(clauses, literals)
# tries to compute a solution to the set clauses. If this is successful
# a set of unit clauses is returned. This set of unit clauses does not contain
# any complementary literals and therefore corresponds to a valuation satisfying
# all clauses. If clauses is unsatisfiable, instead the set containing the
# empty clause is returned.
#
# The argument literals contains all those literals that have already been used
# to reduce the clauses. Initially, this set is empty.
def davis_putnam(clauses, literals):
lits = literals.copy()
s = clauses.copy()
s = saturate(s);
empty = frozenset()
falsum = set([empty])
if empty in s:
return falsum
if all([len(c) == 1 for c in s]):
return s
l = select_literal(s, lits)
lits1 = lits | set([l])
r = davis_putnam(s | set([frozenset([l])]), lits1)
if r != falsum:
return r
neg_l = negate_literal(l)
lits2 = lits | set([neg_l])
return davis_putnam(s | set([frozenset([neg_l])]), lits2)
# S is a set of clauses. The call saturate(S) computes the set of those clauses
# that can be derived from S via unit cuts. Furthermore, all clauses in S that
# are subsumed by unit clauses are removed from S.
def saturate(s):
units = set(k for k in s if len(k) == 1)
used = set()
while len(units) > 0:
unit = units.pop()
used |= set([unit])
l = next(iter(unit))
s = reduce(s, l)
units = set(k for k in s if len(k) == 1) - used
return s
# The procedure reduce(s,l) performs all unit cuts and all unit subsumptions that
# are possible using the literal l.
def reduce(s, l):
l_negated = negate_literal(l)
return set(k - set([l_negated]) for k in s if l_negated in k) \
| set(k for k in s if l_negated not in k and l not in k) \
| set([frozenset([l])])
# We select an arbitrary literal from an arbitrary clause that has not been used
# before.
def select_literal(s, forbidden):
literals = set(l for c in s for l in c) - forbidden
return next(iter(literals))
# Compute the complement of the literal l.
def negate_literal(l):
if (l[0] == "+"):
return ("-", l[1])
else:
return ("+", l[1])
if __name__ == "__main__":
c1 = frozenset((("+", "r"), ("+", "p"), ("+", "s")))
c2 = frozenset((("+", "r"), ("+", "s")))
c3 = frozenset((("+", "p"), ("+", "q"), ("+", "s")))
c4 = frozenset((("-", "p"), ("-", "q")))
c5 = frozenset((("-", "p"), ("+", "s"), ("-", "r")))
c6 = frozenset((("+", "p"), ("-", "q"), ("+", "r")))
c7 = frozenset((("-", "r"), ("-", "s"), ("+", "q")))
c8 = frozenset((("-", "p"), ("-", "s")))
c9 = frozenset((("+", "p"), ("-", "r"), ("-", "q")))
c0 = frozenset((("-", "p"), ("+", "r"), ("+", "q"), ("-", "s")))
m = set([c0, c1, c2, c3, c4, c5, c6, c7, c8, c9])
print(davis_putnam(m, set()))
|
the-stack_0_24699
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Built-in metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import types
import numpy as np
import six
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import categorical_hinge
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras.losses import logcosh
from tensorflow.python.keras.losses import mean_absolute_error
from tensorflow.python.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
from tensorflow.python.keras.saving.saved_model import metric_serialization
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras.utils.generic_utils import to_list
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_variable
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import util as tf_losses_utils
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.metrics.Metric')
@six.add_metaclass(abc.ABCMeta)
class Metric(base_layer.Layer):
"""Encapsulates metric logic and state.
Usage:
```python
m = SomeMetric(...)
for input in ...:
m.update_state(input)
print('Final result: ', m.result().numpy())
```
Usage with tf.keras API:
```python
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.CategoricalAccuracy()])
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
model.fit(dataset, epochs=10)
```
To be implemented by subclasses:
* `__init__()`: All state variables should be created in this method by
calling `self.add_weight()` like: `self.var = self.add_weight(...)`
* `update_state()`: Has all updates to the state variables like:
self.var.assign_add(...).
* `result()`: Computes and returns a value for the metric
from the state variables.
Example subclass implementation:
```python
class BinaryTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_weights(sample_weight, values)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
```
"""
def __init__(self, name=None, dtype=None, **kwargs):
super(Metric, self).__init__(name=name, dtype=dtype, **kwargs)
self.stateful = True # All metric layers are stateful.
self.built = True
if not base_layer_utils.v2_dtype_behavior_enabled():
# We only do this when the V2 behavior is not enabled, as when it is
# enabled, the dtype already defaults to floatx.
self._dtype = K.floatx() if dtype is None else dtypes.as_dtype(dtype).name
def __new__(cls, *args, **kwargs):
obj = super(Metric, cls).__new__(cls)
# If `update_state` is not in eager/tf.function and it is not from a
# built-in metric, wrap it in `tf.function`. This is so that users writing
# custom metrics in v1 need not worry about control dependencies and
# return ops.
if (base_layer_utils.is_in_eager_or_tf_function() or
is_built_in(cls)):
update_state_fn = obj.update_state
else:
if isinstance(obj.update_state, def_function.Function):
update_state_fn = obj.update_state
else:
update_state_fn = def_function.function(obj.update_state)
obj.update_state = types.MethodType(
metrics_utils.update_state_wrapper(update_state_fn), obj)
obj.result = types.MethodType(metrics_utils.result_wrapper(obj.result), obj)
return obj
def __call__(self, *args, **kwargs):
"""Accumulates statistics and then computes metric result value.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric,
passed on to `update_state()`.
Returns:
The metric value tensor.
"""
def replica_local_fn(*args, **kwargs):
"""Updates the state of the metric in a replica-local context."""
update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable
update_ops = []
if update_op is not None:
update_ops.append(update_op)
with ops.control_dependencies(update_ops):
result_t = self.result() # pylint: disable=not-callable
# We are adding the metric object as metadata on the result tensor.
# This is required when we want to use a metric with `add_metric` API on
# a Model/Layer in graph mode. This metric instance will later be used
# to reset variable state after each epoch of training.
# Example:
# model = Model()
# mean = Mean()
# model.add_metric(mean(values), name='mean')
result_t._metric_obj = self # pylint: disable=protected-access
return result_t
from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top
return distributed_training_utils.call_replica_local_fn(
replica_local_fn, *args, **kwargs)
@property
def dtype(self):
return self._dtype
def get_config(self):
"""Returns the serializable config of the metric."""
return {'name': self.name, 'dtype': self.dtype}
def reset_states(self):
"""Resets all of the metric state variables.
This function is called between epochs/steps,
when a metric is evaluated during training.
"""
K.batch_set_value([(v, 0) for v in self.variables])
@abc.abstractmethod
def update_state(self, *args, **kwargs):
"""Accumulates statistics for the metric.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def result(self):
"""Computes and returns the metric value tensor.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
"""
raise NotImplementedError('Must be implemented in subclasses.')
### For use by subclasses ###
@doc_controls.for_subclass_implementers
def add_weight(self,
name,
shape=(),
aggregation=tf_variables.VariableAggregation.SUM,
synchronization=tf_variables.VariableSynchronization.ON_READ,
initializer=None,
dtype=None):
"""Adds state variable. Only for use by subclasses."""
from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top
if distribute_ctx.has_strategy():
strategy = distribute_ctx.get_strategy()
else:
strategy = None
# TODO(b/120571621): Make `ON_READ` work with Keras metrics on TPU.
if distributed_training_utils.is_tpu_strategy(strategy):
synchronization = tf_variables.VariableSynchronization.ON_WRITE
return super(Metric, self).add_weight(
name=name,
shape=shape,
dtype=self._dtype if dtype is None else dtype,
trainable=False,
initializer=initializer,
collections=[],
synchronization=synchronization,
aggregation=aggregation)
### End: For use by subclasses ###
@property
def _trackable_saved_model_saver(self):
return metric_serialization.MetricSavedModelSaver(self)
class Reduce(Metric):
"""Encapsulates metrics that perform a reduce operation on the values."""
def __init__(self, reduction, name, dtype=None):
"""Creates a `Reduce` instance.
Args:
reduction: a `tf.keras.metrics.Reduction` enum value.
name: string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Reduce, self).__init__(name=name, dtype=dtype)
self.reduction = reduction
with ops.init_scope():
self.total = self.add_weight(
'total', initializer=init_ops.zeros_initializer)
if reduction in [metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
metrics_utils.Reduction.WEIGHTED_MEAN]:
self.count = self.add_weight(
'count', initializer=init_ops.zeros_initializer)
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the reduction metric.
For example, if `values` is [1, 3, 5, 7] and reduction=SUM_OVER_BATCH_SIZE,
then the value of `result()` is 4. If the `sample_weight` is specified as
[1, 1, 0, 0] then value of `result()` would be 2.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
[values], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[values], sample_weight)
values = math_ops.cast(values, self._dtype)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = tf_losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
if self.reduction == metrics_utils.Reduction.SUM:
values = math_ops.reduce_sum(
values, axis=list(range(weight_ndim, ndim)))
else:
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
values = math_ops.multiply(values, sample_weight)
value_sum = math_ops.reduce_sum(values)
with ops.control_dependencies([value_sum]):
update_total_op = self.total.assign_add(value_sum)
# Exit early if the reduction doesn't have a denominator.
if self.reduction == metrics_utils.Reduction.SUM:
return update_total_op
# Update `count` for reductions that require a denominator.
if self.reduction == metrics_utils.Reduction.SUM_OVER_BATCH_SIZE:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
elif self.reduction == metrics_utils.Reduction.WEIGHTED_MEAN:
if sample_weight is None:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
else:
num_values = math_ops.reduce_sum(sample_weight)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
with ops.control_dependencies([update_total_op]):
return self.count.assign_add(num_values)
def result(self):
if self.reduction == metrics_utils.Reduction.SUM:
return array_ops.identity(self.total)
elif self.reduction in [
metrics_utils.Reduction.WEIGHTED_MEAN,
metrics_utils.Reduction.SUM_OVER_BATCH_SIZE
]:
return math_ops.div_no_nan(self.total, self.count)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
@keras_export('keras.metrics.Sum')
class Sum(Reduce):
"""Computes the (weighted) sum of the given values.
For example, if values is [1, 3, 5, 7] then the sum is 16.
If the weights were specified as [1, 1, 0, 0] then the sum would be 4.
This metric creates one variable, `total`, that is used to compute the sum of
`values`. This is ultimately returned as `sum`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
Usage:
>>> m = tf.keras.metrics.Sum()
>>> _ = m.update_state([1, 3, 5, 7])
>>> m.result().numpy()
16.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs))
model.compile('sgd', loss='mse')
```
"""
def __init__(self, name='sum', dtype=None):
"""Creates a `Sum` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Sum, self).__init__(reduction=metrics_utils.Reduction.SUM,
name=name, dtype=dtype)
@keras_export('keras.metrics.Mean')
class Mean(Reduce):
"""Computes the (weighted) mean of the given values.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as `mean`
which is an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.Mean()
>>> _ = m.update_state([1, 3, 5, 7])
>>> m.result().numpy()
4.0
>>> m.reset_states()
>>> _ = m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
>>> m.result().numpy()
2.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs))
model.compile('sgd', loss='mse')
```
"""
def __init__(self, name='mean', dtype=None):
"""Creates a `Mean` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Mean, self).__init__(
reduction=metrics_utils.Reduction.WEIGHTED_MEAN, name=name, dtype=dtype)
@keras_export('keras.metrics.MeanRelativeError')
class MeanRelativeError(Mean):
"""Computes the mean relative error by normalizing with the given values.
This metric creates two local variables, `total` and `count` that are used to
compute the mean relative error. This is weighted by `sample_weight`, and
it is ultimately returned as `mean_relative_error`:
an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3])
>>> _ = m.update_state([1, 3, 2, 3], [2, 4, 6, 8])
>>> # metric = mean(|y_pred - y_true| / normalizer)
>>> # = mean([1, 1, 4, 5] / [1, 3, 2, 3]) = mean([1, 1/3, 2, 5/3])
>>> # = 5/4 = 1.25
>>> m.result().numpy()
1.25
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])])
```
"""
def __init__(self, normalizer, name=None, dtype=None):
"""Creates a `MeanRelativeError` instance.
Args:
normalizer: The normalizer values with same shape as predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(MeanRelativeError, self).__init__(name=name, dtype=dtype)
normalizer = math_ops.cast(normalizer, self._dtype)
self.normalizer = normalizer
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_pred, y_true], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true], sample_weight)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
y_pred, self.normalizer = confusion_matrix.remove_squeezable_dimensions(
y_pred, self.normalizer)
y_pred.shape.assert_is_compatible_with(y_true.shape)
relative_errors = math_ops.div_no_nan(
math_ops.abs(y_true - y_pred), self.normalizer)
return super(MeanRelativeError, self).update_state(
relative_errors, sample_weight=sample_weight)
def get_config(self):
n = self.normalizer
config = {'normalizer': K.eval(n) if is_tensor_or_variable(n) else n}
base_config = super(MeanRelativeError, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MeanMetricWrapper(Mean):
"""Wraps a stateless metric function with the Mean metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `MeanMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature
`fn(y_true, y_pred, **kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
sample_weight: Optional `sample_weight` acts as a
coefficient for the metric. If a scalar is provided, then the metric is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the metric for each sample of the batch is rescaled
by the corresponding element in the `sample_weight` vector. If the shape
of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
to this shape), then each metric element of `y_pred` is scaled by the
corresponding value of `sample_weight`. (Note on `dN-1`: all metric
functions reduce by 1 dimension, usually the last axis (-1)).
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_true, y_pred], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_true, y_pred], sample_weight)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
matches = self._fn(y_true, y_pred, **self._fn_kwargs)
return super(MeanMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
if type(self) is MeanMetricWrapper: # pylint: disable=unidiomatic-typecheck
# Only include function argument when the object is a MeanMetricWrapper
# and not a subclass.
config['fn'] = self._fn
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if is_tensor_or_variable(v) else v
base_config = super(MeanMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
# Note that while MeanMetricWrapper itself isn't public, objects of this
# class may be created and added to the model by calling model.compile.
if cls is MeanMetricWrapper:
fn = get(config.pop('fn'))
return cls(fn, **config)
return super(MeanMetricWrapper, cls).from_config(config)
@keras_export('keras.metrics.Accuracy')
class Accuracy(MeanMetricWrapper):
"""Calculates how often predictions equals labels.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.Accuracy()
>>> _ = m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
>>> m.result().numpy()
0.75
>>> m.reset_states()
>>> _ = m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
... sample_weight=[1, 1, 0, 0])
>>> m.result().numpy()
0.5
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Accuracy()])
```
"""
def __init__(self, name='accuracy', dtype=None):
super(Accuracy, self).__init__(accuracy, name, dtype=dtype)
@keras_export('keras.metrics.BinaryAccuracy')
class BinaryAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches binary labels.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.BinaryAccuracy()
>>> _ = m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
>>> m.result().numpy()
0.75
>>> m.reset_states()
>>> _ = m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
0.5
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.BinaryAccuracy()])
```
"""
def __init__(self, name='binary_accuracy', dtype=None, threshold=0.5):
"""Creates a `BinaryAccuracy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
"""
super(BinaryAccuracy, self).__init__(
binary_accuracy, name, dtype=dtype, threshold=threshold)
@keras_export('keras.metrics.CategoricalAccuracy')
class CategoricalAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches one-hot labels.
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `categorical accuracy`: an idempotent operation that
simply divides `total` by `count`.
`y_pred` and `y_true` should be passed in as vectors of probabilities, rather
than as labels. If necessary, use `tf.one_hot` to expand `y_true` as a vector.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.CategoricalAccuracy()
>>> _ = m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> _ = m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalAccuracy()])
```
"""
def __init__(self, name='categorical_accuracy', dtype=None):
"""Creates a `CategoricalAccuracy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(CategoricalAccuracy, self).__init__(
categorical_accuracy, name, dtype=dtype)
@keras_export('keras.metrics.SparseCategoricalAccuracy')
class SparseCategoricalAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches integer labels.
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `sparse categorical accuracy`: an idempotent operation
that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.SparseCategoricalAccuracy()
>>> _ = m.update_state([[2], [1]], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> _ = m.update_state([[2], [1]], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
```
"""
def __init__(self, name='sparse_categorical_accuracy', dtype=None):
super(SparseCategoricalAccuracy, self).__init__(
sparse_categorical_accuracy, name, dtype=dtype)
@keras_export('keras.metrics.TopKCategoricalAccuracy')
class TopKCategoricalAccuracy(MeanMetricWrapper):
"""Computes how often targets are in the top `K` predictions.
Usage:
>>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
>>> _ = m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> _ = m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='top_k_categorical_accuracy', dtype=None):
"""Creates a `TopKCategoricalAccuracy` instance.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(TopKCategoricalAccuracy, self).__init__(
top_k_categorical_accuracy, name, dtype=dtype, k=k)
@keras_export('keras.metrics.SparseTopKCategoricalAccuracy')
class SparseTopKCategoricalAccuracy(MeanMetricWrapper):
"""Computes how often integer targets are in the top `K` predictions.
Usage:
>>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
>>> _ = m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> _ = m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='sparse_top_k_categorical_accuracy', dtype=None):
"""Creates a `SparseTopKCategoricalAccuracy` instance.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(SparseTopKCategoricalAccuracy, self).__init__(
sparse_top_k_categorical_accuracy, name, dtype=dtype, k=k)
class _ConfusionMatrixConditionCount(Metric):
"""Calculates the number of the given confusion matrix condition."""
def __init__(self,
confusion_matrix_cond,
thresholds=None,
name=None,
dtype=None):
"""Creates a `_ConfusionMatrixConditionCount` instance.
Args:
confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` conditions.
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(_ConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype)
self._confusion_matrix_cond = confusion_matrix_cond
self.init_thresholds = thresholds
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=0.5)
self.accumulator = self.add_weight(
'accumulator',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the given confusion matrix condition statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{self._confusion_matrix_cond: self.accumulator},
y_true,
y_pred,
thresholds=self.thresholds,
sample_weight=sample_weight)
def result(self):
if len(self.thresholds) == 1:
result = self.accumulator[0]
else:
result = self.accumulator
return ops.convert_to_tensor_v2(result)
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {'thresholds': self.init_thresholds}
base_config = super(_ConfusionMatrixConditionCount, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.FalsePositives')
class FalsePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of false positives.
If `sample_weight` is given, calculates the sum of the weights of
false positives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.FalsePositives()
>>> _ = m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> _ = m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.FalsePositives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `FalsePositives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(FalsePositives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.FalseNegatives')
class FalseNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of false negatives.
If `sample_weight` is given, calculates the sum of the weights of
false negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.FalseNegatives()
>>> _ = m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> _ = m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.FalseNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `FalseNegatives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(FalseNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.TrueNegatives')
class TrueNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of true negatives.
If `sample_weight` is given, calculates the sum of the weights of
true negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of true negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.TrueNegatives()
>>> _ = m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> _ = m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.TrueNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `TrueNegatives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(TrueNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.TruePositives')
class TruePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of true positives.
If `sample_weight` is given, calculates the sum of the weights of
true positives. This metric creates one local variable, `true_positives`
that is used to keep track of the number of true positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.TruePositives()
>>> _ = m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> _ = m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.TruePositives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `TruePositives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(TruePositives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.Precision')
class Precision(Metric):
"""Computes the precision of the predictions with respect to the labels.
The metric creates two local variables, `true_positives` and `false_positives`
that are used to compute the precision. This value is ultimately returned as
`precision`, an idempotent operation that simply divides `true_positives`
by the sum of `true_positives` and `false_positives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, we'll calculate precision as how often on average a class
among the top-k classes with the highest predicted values of a batch entry is
correct and can be found in the label for that entry.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold and/or in the
top-k highest predictions, and computing the fraction of them for which
`class_id` is indeed a correct label.
Usage:
>>> m = tf.keras.metrics.Precision()
>>> _ = m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result().numpy()
0.6666667
>>> m.reset_states()
>>> _ = m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
>>> # With top_k=2, it will calculate precision over y_true[:2] and y_pred[:2]
>>> m = tf.keras.metrics.Precision(top_k=2)
>>> _ = m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
>>> m.result().numpy()
0.0
>>> # With top_k=4, it will calculate precision over y_true[:4] and y_pred[:4]
>>> m = tf.keras.metrics.Precision(top_k=4)
>>> _ = m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
>>> m.result().numpy()
0.5
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Precision()])
```
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
"""Creates a `Precision` instance.
Args:
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate precision with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Precision, self).__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false positive statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
result = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_positives)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super(Precision, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.Recall')
class Recall(Metric):
"""Computes the recall of the predictions with respect to the labels.
This metric creates two local variables, `true_positives` and
`false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, recall will be computed as how often on average a class
among the labels of a batch entry is in the top-k predictions.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing the
fraction of them for which `class_id` is above the threshold and/or in the
top-k predictions.
Usage:
>>> m = tf.keras.metrics.Recall()
>>> _ = m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result().numpy()
0.6666667
>>> m.reset_states()
>>> _ = m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Recall()])
```
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
"""Creates a `Recall` instance.
Args:
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate recall with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating recall.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Recall, self).__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false negative statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
result = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_negatives)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super(Recall, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@six.add_metaclass(abc.ABCMeta)
class SensitivitySpecificityBase(Metric):
"""Abstract base class for computing sensitivity and specificity.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
"""
def __init__(self, value, num_thresholds=200, name=None, dtype=None):
super(SensitivitySpecificityBase, self).__init__(name=name, dtype=dtype)
if num_thresholds <= 0:
raise ValueError('`num_thresholds` must be > 0.')
self.value = value
self.true_positives = self.add_weight(
'true_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
# Compute `num_thresholds` thresholds in [0, 1]
if num_thresholds == 1:
self.thresholds = [0.5]
else:
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
self.thresholds = [0.0] + thresholds + [1.0]
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
},
y_true,
y_pred,
thresholds=self.thresholds,
sample_weight=sample_weight)
def reset_states(self):
num_thresholds = len(self.thresholds)
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
@keras_export('keras.metrics.SensitivityAtSpecificity')
class SensitivityAtSpecificity(SensitivitySpecificityBase):
"""Computes the sensitivity at a given specificity.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such (tp / (tp + fn)).
`Specificity` measures the proportion of actual negatives that are correctly
identified as such (tn / (tn + fp)).
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
sensitivity at the given specificity. The threshold for the given specificity
value is computed and used to evaluate the corresponding sensitivity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Usage:
>>> m = tf.keras.metrics.SensitivityAtSpecificity(0.4, num_thresholds=1)
>>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
1.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SensitivityAtSpecificity()])
```
"""
def __init__(self, specificity, num_thresholds=200, name=None, dtype=None):
"""Creates a `SensitivityAtSpecificity` instance.
Args:
specificity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given specificity.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
self.specificity = specificity
self.num_thresholds = num_thresholds
super(SensitivityAtSpecificity, self).__init__(
specificity, num_thresholds=num_thresholds, name=name, dtype=dtype)
def result(self):
# Calculate specificities at all the thresholds.
specificities = math_ops.div_no_nan(
self.true_negatives, self.true_negatives + self.false_positives)
# Find the index of the threshold where the specificity is closest to the
# given specificity.
min_index = math_ops.argmin(
math_ops.abs(specificities - self.value), axis=0)
min_index = math_ops.cast(min_index, dtypes.int32)
# Compute sensitivity at that index.
return math_ops.div_no_nan(
self.true_positives[min_index],
self.true_positives[min_index] + self.false_negatives[min_index])
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'specificity': self.specificity
}
base_config = super(SensitivityAtSpecificity, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.SpecificityAtSensitivity')
class SpecificityAtSensitivity(SensitivitySpecificityBase):
"""Computes the specificity at a given sensitivity.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such (tp / (tp + fn)).
`Specificity` measures the proportion of actual negatives that are correctly
identified as such (tn / (tn + fp)).
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
specificity at the given sensitivity. The threshold for the given sensitivity
value is computed and used to evaluate the corresponding specificity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Usage:
>>> m = tf.keras.metrics.SpecificityAtSensitivity(0.8, num_thresholds=1)
>>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> m.result().numpy()
1.0
>>> m.reset_states()
>>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
1.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SpecificityAtSensitivity()])
```
"""
def __init__(self, sensitivity, num_thresholds=200, name=None, dtype=None):
"""Creates a `SpecificityAtSensitivity` instance.
Args:
sensitivity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given sensitivity.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
self.sensitivity = sensitivity
self.num_thresholds = num_thresholds
super(SpecificityAtSensitivity, self).__init__(
sensitivity, num_thresholds=num_thresholds, name=name, dtype=dtype)
def result(self):
# Calculate sensitivities at all the thresholds.
sensitivities = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
# Find the index of the threshold where the sensitivity is closest to the
# requested value.
min_index = math_ops.argmin(
math_ops.abs(sensitivities - self.value), axis=0)
min_index = math_ops.cast(min_index, dtypes.int32)
# Compute specificity at that index.
return math_ops.div_no_nan(
self.true_negatives[min_index],
self.true_negatives[min_index] + self.false_positives[min_index])
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'sensitivity': self.sensitivity
}
base_config = super(SpecificityAtSensitivity, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.PrecisionAtRecall')
class PrecisionAtRecall(SensitivitySpecificityBase):
"""Computes the precision at a given recall.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
precision at the given recall. The threshold for the given recall
value is computed and used to evaluate the corresponding precision.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.PrecisionAtRecall(0.8, num_thresholds=1)
>>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> m.result().numpy()
1.0
>>> m.reset_states()
>>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
1.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.PrecisionAtRecall(recall=0.8)])
```
"""
def __init__(self, recall, num_thresholds=200, name=None, dtype=None):
"""Creates a `PrecisionAtRecall` instance.
Args:
recall: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given recall.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
if recall < 0 or recall > 1:
raise ValueError('`recall` must be in the range [0, 1].')
self.recall = recall
self.num_thresholds = num_thresholds
super(PrecisionAtRecall, self).__init__(
value=recall,
num_thresholds=num_thresholds,
name=name,
dtype=dtype)
def result(self):
# Calculate recall at all the thresholds.
recalls = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
# Find the index of the threshold where the recall is closest to the
# requested value.
min_index = math_ops.argmin(
math_ops.abs(recalls - self.value), axis=0)
min_index = math_ops.cast(min_index, dtypes.int32)
# Compute precision at that index.
return math_ops.div_no_nan(
self.true_positives[min_index],
self.true_positives[min_index] + self.false_positives[min_index])
def get_config(self):
config = {'num_thresholds': self.num_thresholds, 'recall': self.recall}
base_config = super(PrecisionAtRecall, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.RecallAtPrecision')
class RecallAtPrecision(SensitivitySpecificityBase):
"""Computes the maximally achievable recall at a required precision.
For a given score-label-distribution the required precision might not
be achievable, in this case 0.0 is returned as recall.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
recall at the given precision. The threshold for the given precision
value is computed and used to evaluate the corresponding recall.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.RecallAtPrecision(0.8, num_thresholds=1)
>>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
1.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.RecallAtPrecision(precision=0.8)])
```
"""
def __init__(self, precision, num_thresholds=200, name=None, dtype=None):
"""Creates a `RecallAtPrecision` instance.
Args:
precision: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given precision.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
if precision < 0 or precision > 1:
raise ValueError('`precision` must be in the range [0, 1].')
self.precision = precision
self.num_thresholds = num_thresholds
super(RecallAtPrecision, self).__init__(
value=precision,
num_thresholds=num_thresholds,
name=name,
dtype=dtype)
def result(self):
# Calculate precision and recall at all the thresholds.
# All recalls are computed, because they are not a monotoneous function of
# precision and we want to search for the highest feasible recall.
precisions = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
recalls = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
# Find best recall where the precision is as good as required.
feasible = array_ops.where(math_ops.greater_equal(precisions, self.value))
feasible_exists = math_ops.greater(array_ops.size(feasible), 0)
best_recall = control_flow_ops.cond(
feasible_exists,
lambda: math_ops.reduce_max(array_ops.gather(recalls, feasible)),
lambda: 0.0)
return best_recall
def get_config(self):
config = {'num_thresholds': self.num_thresholds,
'precision': self.precision}
base_config = super(RecallAtPrecision, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.AUC')
class AUC(Metric):
"""Computes the approximate AUC (Area under the curve) via a Riemann sum.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the AUC.
To discretize the AUC curve, a linearly spaced set of thresholds is used to
compute pairs of recall and precision values. The area under the ROC-curve is
therefore computed using the height of the recall values by the false positive
rate, while the area under the PR-curve is the computed using the height of
the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`. The `thresholds` parameter can be
used to manually specify thresholds which split the predictions more evenly.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> m = tf.keras.metrics.AUC(num_thresholds=3)
>>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
>>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
>>> # recall = [1, 0.5, 0], fp_rate = [1, 0, 0]
>>> # auc = ((((1+0.5)/2)*(1-0))+ (((0.5+0)/2)*(0-0))) = 0.75
>>> m.result().numpy()
0.75
>>> m.reset_states()
>>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
1.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.AUC()])
```
"""
def __init__(self,
num_thresholds=200,
curve='ROC',
summation_method='interpolation',
name=None,
dtype=None,
thresholds=None,
multi_label=False,
label_weights=None):
"""Creates an `AUC` instance.
Args:
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use when discretizing the roc curve. Values must be > 1.
curve: (Optional) Specifies the name of the curve to be computed, 'ROC'
[default] or 'PR' for the Precision-Recall-curve.
summation_method: (Optional) Specifies the Riemann summation method used
(https://en.wikipedia.org/wiki/Riemann_sum): 'interpolation' [default],
applies mid-point summation scheme for `ROC`. For PR-AUC, interpolates
(true/false) positives but not the ratio that is precision (see Davis
& Goadrich 2006 for details); 'minoring' that applies left summation
for increasing intervals and right summation for decreasing intervals;
'majoring' that does the opposite.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
thresholds: (Optional) A list of floating point values to use as the
thresholds for discretizing the curve. If set, the `num_thresholds`
parameter is ignored. Values should be in [0, 1]. Endpoint thresholds
equal to {-epsilon, 1+epsilon} for a small positive epsilon value will
be automatically included with these to correctly handle predictions
equal to exactly 0 or 1.
multi_label: boolean indicating whether multilabel data should be
treated as such, wherein AUC is computed separately for each label and
then averaged across labels, or (when False) if the data should be
flattened into a single label before AUC computation. In the latter
case, when multilabel data is passed to AUC, each label-prediction pair
is treated as an individual data point. Should be set to False for
multi-class data.
label_weights: (optional) list, array, or tensor of non-negative weights
used to compute AUCs for multilabel data. When `multi_label` is True,
the weights are applied to the individual label AUCs when they are
averaged to produce the multi-label AUC. When it's False, they are used
to weight the individual label predictions in computing the confusion
matrix on the flattened data. Note that this is unlike class_weights in
that class_weights weights the example depending on the value of its
label, whereas label_weights depends only on the index of that label
before flattening; therefore `label_weights` should not be used for
multi-class data.
"""
# Validate configurations.
if isinstance(curve, metrics_utils.AUCCurve) and curve not in list(
metrics_utils.AUCCurve):
raise ValueError('Invalid curve: "{}". Valid options are: "{}"'.format(
curve, list(metrics_utils.AUCCurve)))
if isinstance(
summation_method,
metrics_utils.AUCSummationMethod) and summation_method not in list(
metrics_utils.AUCSummationMethod):
raise ValueError(
'Invalid summation method: "{}". Valid options are: "{}"'.format(
summation_method, list(metrics_utils.AUCSummationMethod)))
# Update properties.
if thresholds is not None:
# If specified, use the supplied thresholds.
self.num_thresholds = len(thresholds) + 2
thresholds = sorted(thresholds)
else:
if num_thresholds <= 1:
raise ValueError('`num_thresholds` must be > 1.')
# Otherwise, linearly interpolate (num_thresholds - 2) thresholds in
# (0, 1).
self.num_thresholds = num_thresholds
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
# Add an endpoint "threshold" below zero and above one for either
# threshold method to account for floating point imprecisions.
self.thresholds = [0.0 - K.epsilon()] + thresholds + [1.0 + K.epsilon()]
if isinstance(curve, metrics_utils.AUCCurve):
self.curve = curve
else:
self.curve = metrics_utils.AUCCurve.from_str(curve)
if isinstance(summation_method, metrics_utils.AUCSummationMethod):
self.summation_method = summation_method
else:
self.summation_method = metrics_utils.AUCSummationMethod.from_str(
summation_method)
super(AUC, self).__init__(name=name, dtype=dtype)
# Handle multilabel arguments.
self.multi_label = multi_label
if label_weights is not None:
label_weights = constant_op.constant(label_weights, dtype=self.dtype)
checks = [
check_ops.assert_non_negative(
label_weights,
message='All values of `label_weights` must be non-negative.')
]
self.label_weights = control_flow_ops.with_dependencies(
checks, label_weights)
else:
self.label_weights = None
self._built = False
if self.multi_label:
self._num_labels = None
else:
self._build(None)
def _build(self, shape):
"""Initialize TP, FP, TN, and FN tensors, given the shape of the data."""
if self.multi_label:
if shape.ndims != 2:
raise ValueError('`y_true` must have rank=2 when `multi_label` is '
'True. Found rank %s.' % shape.ndims)
self._num_labels = shape[1]
variable_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(self.num_thresholds), self._num_labels])
else:
variable_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(self.num_thresholds)])
self._build_input_shape = shape
# Create metric variables
self.true_positives = self.add_weight(
'true_positives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
if self.multi_label:
with ops.init_scope():
# This should only be necessary for handling v1 behavior. In v2, AUC
# should be initialized outside of any tf.functions, and therefore in
# eager mode.
if not context.executing_eagerly():
K._initialize_variables(K._get_session()) # pylint: disable=protected-access
self._built = True
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
deps = []
if not self._built:
self._build(tensor_shape.TensorShape(y_pred.shape))
if self.multi_label or (self.label_weights is not None):
# y_true should have shape (number of examples, number of labels).
shapes = [
(y_true, ('N', 'L'))
]
if self.multi_label:
# TP, TN, FP, and FN should all have shape
# (number of thresholds, number of labels).
shapes.extend([(self.true_positives, ('T', 'L')),
(self.true_negatives, ('T', 'L')),
(self.false_positives, ('T', 'L')),
(self.false_negatives, ('T', 'L'))])
if self.label_weights is not None:
# label_weights should be of length equal to the number of labels.
shapes.append((self.label_weights, ('L',)))
deps = [
check_ops.assert_shapes(
shapes, message='Number of labels is not consistent.')
]
# Only forward label_weights to update_confusion_matrix_variables when
# multi_label is False. Otherwise the averaging of individual label AUCs is
# handled in AUC.result
label_weights = None if self.multi_label else self.label_weights
with ops.control_dependencies(deps):
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES:
self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES:
self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES:
self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES:
self.false_negatives,
},
y_true,
y_pred,
self.thresholds,
sample_weight=sample_weight,
multi_label=self.multi_label,
label_weights=label_weights)
def interpolate_pr_auc(self):
"""Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
https://www.biostat.wisc.edu/~page/rocpr.pdf
Note here we derive & use a closed formula not present in the paper
as follows:
Precision = TP / (TP + FP) = TP / P
Modeling all of TP (true positive), FP (false positive) and their sum
P = TP + FP (predicted positive) as varying linearly within each interval
[A, B] between successive thresholds, we get
Precision slope = dTP / dP
= (TP_B - TP_A) / (P_B - P_A)
= (TP - TP_A) / (P - P_A)
Precision = (TP_A + slope * (P - P_A)) / P
The area within the interval is (slope / total_pos_weight) times
int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
Bringing back the factor (slope / total_pos_weight) we'd put aside, we get
slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
where dTP == TP_B - TP_A.
Note that when P_A == 0 the above calculation simplifies into
int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A)
which is really equivalent to imputing constant precision throughout the
first bucket having >0 true positives.
Returns:
pr_auc: an approximation of the area under the P-R curve.
"""
dtp = self.true_positives[:self.num_thresholds -
1] - self.true_positives[1:]
p = self.true_positives + self.false_positives
dp = p[:self.num_thresholds - 1] - p[1:]
prec_slope = math_ops.div_no_nan(
dtp, math_ops.maximum(dp, 0), name='prec_slope')
intercept = self.true_positives[1:] - math_ops.multiply(prec_slope, p[1:])
safe_p_ratio = array_ops.where(
math_ops.logical_and(p[:self.num_thresholds - 1] > 0, p[1:] > 0),
math_ops.div_no_nan(
p[:self.num_thresholds - 1],
math_ops.maximum(p[1:], 0),
name='recall_relative_ratio'),
array_ops.ones_like(p[1:]))
pr_auc_increment = math_ops.div_no_nan(
prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),
math_ops.maximum(self.true_positives[1:] + self.false_negatives[1:], 0),
name='pr_auc_increment')
if self.multi_label:
by_label_auc = math_ops.reduce_sum(
pr_auc_increment, name=self.name + '_by_label', axis=0)
if self.label_weights is None:
# Evenly weighted average of the label AUCs.
return math_ops.reduce_mean(by_label_auc, name=self.name)
else:
# Weighted average of the label AUCs.
return math_ops.div_no_nan(
math_ops.reduce_sum(
math_ops.multiply(by_label_auc, self.label_weights)),
math_ops.reduce_sum(self.label_weights),
name=self.name)
else:
return math_ops.reduce_sum(pr_auc_increment, name='interpolate_pr_auc')
def result(self):
if (self.curve == metrics_utils.AUCCurve.PR and
self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION
):
# This use case is different and is handled separately.
return self.interpolate_pr_auc()
# Set `x` and `y` values for the curves based on `curve` config.
recall = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_negatives)
if self.curve == metrics_utils.AUCCurve.ROC:
fp_rate = math_ops.div_no_nan(self.false_positives,
self.false_positives + self.true_negatives)
x = fp_rate
y = recall
else: # curve == 'PR'.
precision = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
x = recall
y = precision
# Find the rectangle heights based on `summation_method`.
if self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION:
# Note: the case ('PR', 'interpolation') has been handled above.
heights = (y[:self.num_thresholds - 1] + y[1:]) / 2.
elif self.summation_method == metrics_utils.AUCSummationMethod.MINORING:
heights = math_ops.minimum(y[:self.num_thresholds - 1], y[1:])
else: # self.summation_method = metrics_utils.AUCSummationMethod.MAJORING:
heights = math_ops.maximum(y[:self.num_thresholds - 1], y[1:])
# Sum up the areas of all the rectangles.
if self.multi_label:
riemann_terms = math_ops.multiply(x[:self.num_thresholds - 1] - x[1:],
heights)
by_label_auc = math_ops.reduce_sum(
riemann_terms, name=self.name + '_by_label', axis=0)
if self.label_weights is None:
# Unweighted average of the label AUCs.
return math_ops.reduce_mean(by_label_auc, name=self.name)
else:
# Weighted average of the label AUCs.
return math_ops.div_no_nan(
math_ops.reduce_sum(
math_ops.multiply(by_label_auc, self.label_weights)),
math_ops.reduce_sum(self.label_weights),
name=self.name)
else:
return math_ops.reduce_sum(
math_ops.multiply(x[:self.num_thresholds - 1] - x[1:], heights),
name=self.name)
def reset_states(self):
if self.multi_label:
K.batch_set_value([(v, np.zeros((self.num_thresholds, self._num_labels)))
for v in self.variables])
else:
K.batch_set_value([
(v, np.zeros((self.num_thresholds,))) for v in self.variables
])
def get_config(self):
if is_tensor_or_variable(self.label_weights):
label_weights = K.eval(self.label_weights)
else:
label_weights = self.label_weights
config = {
'num_thresholds': self.num_thresholds,
'curve': self.curve.value,
'summation_method': self.summation_method.value,
# We remove the endpoint thresholds as an inverse of how the thresholds
# were initialized. This ensures that a metric initialized from this
# config has the same thresholds.
'thresholds': self.thresholds[1:-1],
'multi_label': self.multi_label,
'label_weights': label_weights
}
base_config = super(AUC, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.CosineSimilarity')
class CosineSimilarity(MeanMetricWrapper):
"""Computes the cosine similarity between the labels and predictions.
cosine similarity = (a . b) / ||a|| ||b||
[Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity)
This metric keeps the average cosine similarity between `predictions` and
`labels` over a stream of data.
Usage:
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
>>> # = ((0. + 0.) + (0.5 + 0.5)) / 2
>>> m = tf.keras.metrics.CosineSimilarity(axis=1)
>>> _ = m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
>>> m.result().numpy()
0.49999997
>>> m.reset_states()
>>> _ = m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]],
... sample_weight=[0.3, 0.7])
>>> m.result().numpy()
0.6999999
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.CosineSimilarity(axis=1)])
```
"""
def __init__(self, name='cosine_similarity', dtype=None, axis=-1):
"""Creates a `CosineSimilarity` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
"""
super(CosineSimilarity, self).__init__(
cosine_similarity, name, dtype=dtype, axis=axis)
@keras_export('keras.metrics.MeanAbsoluteError')
class MeanAbsoluteError(MeanMetricWrapper):
"""Computes the mean absolute error between the labels and predictions.
Usage:
>>> m = tf.keras.metrics.MeanAbsoluteError()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd', loss='mse', metrics=[tf.keras.metrics.MeanAbsoluteError()])
```
"""
def __init__(self, name='mean_absolute_error', dtype=None):
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(MeanMetricWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
Usage:
>>> m = tf.keras.metrics.MeanAbsolutePercentageError()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
250000000.0
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
500000000.0
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
```
"""
def __init__(self, name='mean_absolute_percentage_error', dtype=None):
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanSquaredError')
class MeanSquaredError(MeanMetricWrapper):
"""Computes the mean squared error between `y_true` and `y_pred`.
Usage:
>>> m = tf.keras.metrics.MeanSquaredError()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd', loss='mse', metrics=[tf.keras.metrics.MeanSquaredError()])
```
"""
def __init__(self, name='mean_squared_error', dtype=None):
super(MeanSquaredError, self).__init__(
mean_squared_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(MeanMetricWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
Usage:
>>> m = tf.keras.metrics.MeanSquaredLogarithmicError()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.12011322
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.24022643
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])
```
"""
def __init__(self, name='mean_squared_logarithmic_error', dtype=None):
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name, dtype=dtype)
@keras_export('keras.metrics.Hinge')
class Hinge(MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
>>> m = tf.keras.metrics.Hinge()
>>> _ = m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.3
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.1
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Hinge()])
```
"""
def __init__(self, name='hinge', dtype=None):
super(Hinge, self).__init__(hinge, name, dtype=dtype)
@keras_export('keras.metrics.SquaredHinge')
class SquaredHinge(MeanMetricWrapper):
"""Computes the squared hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
>>> m = tf.keras.metrics.SquaredHinge()
>>> _ = m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.86
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.46
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SquaredHinge()])
```
"""
def __init__(self, name='squared_hinge', dtype=None):
super(SquaredHinge, self).__init__(squared_hinge, name, dtype=dtype)
@keras_export('keras.metrics.CategoricalHinge')
class CategoricalHinge(MeanMetricWrapper):
"""Computes the categorical hinge metric between `y_true` and `y_pred`.
Usage:
>>> m = tf.keras.metrics.CategoricalHinge()
>>> _ = m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.4000001
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.2
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalHinge()])
```
"""
def __init__(self, name='categorical_hinge', dtype=None):
super(CategoricalHinge, self).__init__(categorical_hinge, name, dtype=dtype)
@keras_export('keras.metrics.RootMeanSquaredError')
class RootMeanSquaredError(Mean):
"""Computes root mean squared error metric between `y_true` and `y_pred`.
Usage:
>>> m = tf.keras.metrics.RootMeanSquaredError()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.70710677
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.RootMeanSquaredError()])
```
"""
def __init__(self, name='root_mean_squared_error', dtype=None):
super(RootMeanSquaredError, self).__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
error_sq = math_ops.squared_difference(y_pred, y_true)
return super(RootMeanSquaredError, self).update_state(
error_sq, sample_weight=sample_weight)
def result(self):
return math_ops.sqrt(math_ops.div_no_nan(self.total, self.count))
@keras_export('keras.metrics.LogCoshError')
class LogCoshError(MeanMetricWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred - y_true)
Usage:
>>> m = tf.keras.metrics.LogCoshError()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.10844523
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.21689045
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.LogCoshError()])
```
"""
def __init__(self, name='logcosh', dtype=None):
super(LogCoshError, self).__init__(logcosh, name, dtype=dtype)
@keras_export('keras.metrics.Poisson')
class Poisson(MeanMetricWrapper):
"""Computes the Poisson metric between `y_true` and `y_pred`.
`metric = y_pred - y_true * log(y_pred)`
Usage:
>>> m = tf.keras.metrics.Poisson()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.49999997
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.99999994
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Poisson()])
```
"""
def __init__(self, name='poisson', dtype=None):
super(Poisson, self).__init__(poisson, name, dtype=dtype)
@keras_export('keras.metrics.KLDivergence')
class KLDivergence(MeanMetricWrapper):
"""Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`.
`metric = y_true * log(y_true / y_pred)`
Usage:
>>> m = tf.keras.metrics.KLDivergence()
>>> _ = m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
0.45814306
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.9162892
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.KLDivergence()])
```
"""
def __init__(self, name='kullback_leibler_divergence', dtype=None):
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name, dtype=dtype)
@keras_export('keras.metrics.MeanIoU')
class MeanIoU(Metric):
"""Computes the mean Intersection-Over-Union metric.
Mean Intersection-Over-Union is a common evaluation metric for semantic image
segmentation, which first computes the IOU for each semantic class and then
computes the average over classes. IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33
>>> m = tf.keras.metrics.MeanIoU(num_classes=2)
>>> _ = m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result().numpy()
0.33333334
>>> m.reset_states()
>>> _ = m.update_state([0, 0, 1, 1], [0, 1, 0, 1],
... sample_weight=[0.3, 0.3, 0.3, 0.1])
>>> m.result().numpy()
0.23809525
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanIoU(num_classes=2)])
```
"""
def __init__(self, num_classes, name=None, dtype=None):
"""Creates a `MeanIoU` instance.
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(MeanIoU, self).__init__(name=name, dtype=dtype)
self.num_classes = num_classes
# Variable to accumulate the predictions in the confusion matrix. Setting
# the type to be `float64` as required by confusion_matrix_ops.
self.total_cm = self.add_weight(
'total_confusion_matrix',
shape=(num_classes, num_classes),
initializer=init_ops.zeros_initializer,
dtype=dtypes.float64)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
# Flatten the input if its rank > 1.
if y_pred.shape.ndims > 1:
y_pred = array_ops.reshape(y_pred, [-1])
if y_true.shape.ndims > 1:
y_true = array_ops.reshape(y_true, [-1])
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
if sample_weight.shape.ndims > 1:
sample_weight = array_ops.reshape(sample_weight, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
y_true,
y_pred,
self.num_classes,
weights=sample_weight,
dtype=dtypes.float64)
return self.total_cm.assign_add(current_cm)
def result(self):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.cast(
math_ops.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = math_ops.cast(
math_ops.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = math_ops.cast(
array_ops.diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(
math_ops.cast(math_ops.not_equal(denominator, 0), dtype=self._dtype))
iou = math_ops.div_no_nan(true_positives, denominator)
return math_ops.div_no_nan(
math_ops.reduce_sum(iou, name='mean_iou'), num_valid_entries)
def reset_states(self):
K.set_value(self.total_cm, np.zeros((self.num_classes, self.num_classes)))
def get_config(self):
config = {'num_classes': self.num_classes}
base_config = super(MeanIoU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.MeanTensor')
class MeanTensor(Metric):
"""Computes the element-wise (weighted) mean of the given tensors.
`MeanTensor` returns a tensor with the same shape of the input tensors. The
mean value is updated by keeping local variables `total` and `count`. The
`total` tracks the sum of the weighted values, and `count` stores the sum of
the weighted counts.
Usage:
>>> m = tf.keras.metrics.MeanTensor()
>>> _ = m.update_state([0, 1, 2, 3])
>>> _ = m.update_state([4, 5, 6, 7])
>>> m.result().numpy()
array([2., 3., 4., 5.], dtype=float32)
>>> _ = m.update_state([12, 10, 8, 6], sample_weight= [0, 0.2, 0.5, 1])
>>> m.result().numpy()
array([2. , 3.6363635, 4.8 , 5.3333335], dtype=float32)
"""
def __init__(self, name='mean_tensor', dtype=None):
"""Creates a `MeanTensor` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(MeanTensor, self).__init__(name=name, dtype=dtype)
self._shape = None
self._total = None
self._count = None
self._built = False
def _build(self, shape):
self._shape = tensor_shape.TensorShape(shape)
self._build_input_shape = self._shape
# Create new state variables
self._total = self.add_weight(
'total', shape=shape, initializer=init_ops.zeros_initializer)
self._count = self.add_weight(
'count', shape=shape, initializer=init_ops.zeros_initializer)
with ops.init_scope():
if not context.executing_eagerly():
K._initialize_variables(K._get_session()) # pylint: disable=protected-access
self._built = True
@property
def total(self):
return self._total if self._built else None
@property
def count(self):
return self._count if self._built else None
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the element-wise mean.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
values = math_ops.cast(values, self._dtype)
if not self._built:
self._build(values.shape)
elif values.shape != self._shape:
raise ValueError('MeanTensor input values must always have the same '
'shape. Expected shape (set during the first call): {}. '
'Got: {}'.format(self._shape, values.shape))
num_values = array_ops.ones_like(values)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = tf_losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
num_values = math_ops.multiply(num_values, sample_weight)
values = math_ops.multiply(values, sample_weight)
update_total_op = self._total.assign_add(values)
with ops.control_dependencies([update_total_op]):
return self._count.assign_add(num_values)
def result(self):
if not self._built:
raise ValueError(
'MeanTensor does not have any result yet. Please call the MeanTensor '
'instance or use `.update_state(value)` before retrieving the result.'
)
return math_ops.div_no_nan(self.total, self.count)
def reset_states(self):
if self._built:
K.batch_set_value(
[(v, np.zeros(self._shape.as_list())) for v in self.variables])
@keras_export('keras.metrics.BinaryCrossentropy')
class BinaryCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are only two
label classes (0 and 1).
Usage:
>>> m = tf.keras.metrics.BinaryCrossentropy()
>>> _ = m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
0.81492424
>>> m.reset_states()
>>> _ = m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.9162905
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.BinaryCrossentropy()])
```
"""
def __init__(self,
name='binary_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
"""Creates a `BinaryCrossentropy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional )Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are
smoothed, meaning the confidence on label values are relaxed.
e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for
label `0` and `0.9` for label `1`"
"""
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.metrics.CategoricalCrossentropy')
class CategoricalCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are multiple
label classes (2 or more). Here we assume that labels are given as a `one_hot`
representation. eg., When labels values are [2, 0, 1],
`y_true` = [[0, 0, 1], [1, 0, 0], [0, 1, 0]].
Usage:
>>> # EPSILON = 1e-7, y = y_true, y` = y_pred
>>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
>>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(y'), axis = -1)
>>> # = -((log 0.95), (log 0.1))
>>> # = [0.051, 2.302]
>>> # Reduced xent = (0.051 + 2.302) / 2
>>> m = tf.keras.metrics.CategoricalCrossentropy()
>>> _ = m.update_state([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result().numpy()
1.1769392
>>> m.reset_states()
>>> _ = m.update_state([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> m.result().numpy()
1.6271976
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalCrossentropy()])
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional ) Whether `y_pred` is expected to be a logits tensor.
By default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
"""
def __init__(self,
name='categorical_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.metrics.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
Use this crossentropy metric when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` metric.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Usage:
>>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
>>> # logits = log(y_pred)
>>> # softmax = exp(logits) / sum(exp(logits), axis=-1)
>>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(softmax), 1)
>>> # log(softmax) = [[-2.9957, -0.0513, -16.1181],
>>> # [-2.3026, -0.2231, -2.3026]]
>>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
>>> # xent = [0.0513, 2.3026]
>>> # Reduced xent = (0.0513 + 2.3026) / 2
>>> m = tf.keras.metrics.SparseCategoricalCrossentropy()
>>> _ = m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result().numpy()
1.1769392
>>> m.reset_states()
>>> _ = m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> m.result().numpy()
1.6271976
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()])
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional ) Whether `y_pred` is expected to be a logits tensor.
By default, we assume that `y_pred` encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the metric is
computed.
"""
def __init__(self,
name='sparse_categorical_crossentropy',
dtype=None,
from_logits=False,
axis=-1):
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
axis=axis)
class SumOverBatchSize(Reduce):
"""Computes the weighted sum over batch size of the given values.
For example, if values is [1, 3, 5, 7] then the metric value is 4.
If the weights were specified as [1, 1, 0, 0] then the value would be 1.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as sum
over batch size which is an idempotent operation that simply divides `total`
by `count`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
"""
def __init__(self, name='sum_over_batch_size', dtype=None):
super(SumOverBatchSize, self).__init__(
reduction=metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
name=name,
dtype=dtype)
class SumOverBatchSizeMetricWrapper(SumOverBatchSize):
"""Wraps a function with the `SumOverBatchSizeMetricWrapper` metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `SumOverBatchSizeMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(SumOverBatchSizeMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
matches = self._fn(y_true, y_pred, **self._fn_kwargs)
return super(SumOverBatchSizeMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if is_tensor_or_variable(v) else v
base_config = super(SumOverBatchSizeMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def accuracy(y_true, y_pred):
[y_pred, y_true], _ = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true])
y_pred.shape.assert_is_compatible_with(y_true.shape)
if y_true.dtype != y_pred.dtype:
y_pred = math_ops.cast(y_pred, y_true.dtype)
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
@keras_export('keras.metrics.binary_accuracy')
def binary_accuracy(y_true, y_pred, threshold=0.5):
"""Calculates how often predictions matches binary labels.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
threshold: (Optional) Float representing the threshold for deciding whether
prediction values are 1 or 0.
Returns:
Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
"""
threshold = math_ops.cast(threshold, y_pred.dtype)
y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
@keras_export('keras.metrics.categorical_accuracy')
def categorical_accuracy(y_true, y_pred):
"""Calculates how often predictions matches one-hot labels.
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
Args:
y_true: One-hot ground truth values.
y_pred: The prediction values.
Returns:
Categorical accuracy values.
"""
return math_ops.cast(
math_ops.equal(
math_ops.argmax(y_true, axis=-1), math_ops.argmax(y_pred, axis=-1)),
K.floatx())
@keras_export('keras.metrics.sparse_categorical_accuracy')
def sparse_categorical_accuracy(y_true, y_pred):
"""Calculates how often predictions matches integer labels.
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
Args:
y_true: Integer ground truth values.
y_pred: The prediction values.
Returns:
Sparse categorical accuracy values.
"""
y_pred_rank = ops.convert_to_tensor_v2(y_pred).shape.ndims
y_true_rank = ops.convert_to_tensor_v2(y_true).shape.ndims
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None) and (len(
K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast them
# to match.
if K.dtype(y_pred) != K.dtype(y_true):
y_pred = math_ops.cast(y_pred, K.dtype(y_true))
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
@keras_export('keras.metrics.top_k_categorical_accuracy')
def top_k_categorical_accuracy(y_true, y_pred, k=5):
"""Computes how often targets are in the top `K` predictions.
Args:
y_true: The ground truth values.
y_pred: The prediction values.
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
Returns:
Top K categorical accuracy value.
"""
return math_ops.cast(
nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), K.floatx())
@keras_export('keras.metrics.sparse_top_k_categorical_accuracy')
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
"""Computes how often integer targets are in the top `K` predictions.
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
Returns:
Sparse top K categorical accuracy value.
"""
y_pred_rank = ops.convert_to_tensor_v2(y_pred).shape.ndims
y_true_rank = ops.convert_to_tensor_v2(y_true).shape.ndims
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:
y_pred = array_ops.reshape(y_pred, [-1, y_pred.shape[-1]])
if y_true_rank > 1:
y_true = array_ops.reshape(y_true, [-1])
return math_ops.cast(
nn.in_top_k(y_pred, math_ops.cast(y_true, 'int32'), k), K.floatx())
def cosine_proximity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Args:
y_true: The ground truth values.
y_pred: The prediction values.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
Returns:
Cosine similarity value.
"""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return math_ops.reduce_sum(y_true * y_pred, axis=axis)
# Aliases
acc = ACC = accuracy
bce = BCE = binary_crossentropy
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
cosine_similarity = cosine_proximity
def clone_metric(metric):
"""Returns a clone of the metric if stateful, otherwise returns it as is."""
if isinstance(metric, Metric):
with ops.init_scope():
return metric.__class__.from_config(metric.get_config())
return metric
def clone_metrics(metrics):
"""Clones the given metric list/dict."""
return nest.map_structure(clone_metric, metrics)
@keras_export('keras.metrics.serialize')
def serialize(metric):
return serialize_keras_object(metric)
@keras_export('keras.metrics.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='metric function')
@keras_export('keras.metrics.get')
def get(identifier):
"""Return a metric given its identifer."""
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
error_msg = 'Could not interpret metric function identifier: {}'.format(
identifier)
raise ValueError(error_msg)
def is_built_in(cls):
return cls.__module__ == Metric.__module__
|
the-stack_0_24700
|
import unittest
import numpy as np
import openmdao.api as om
import dymos as dm
class MainPhase(dm.Phase):
def initialize(self):
super(MainPhase, self).initialize()
def setup(self):
self.options['ode_class'] = TestODE
super(MainPhase, self).setup()
class TestODE(om.Group):
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem('Sink',
om.ExecComp('sink = chord[0]', sink={'value': 0.0, 'units': None},
chord={'value': np.zeros(4), 'units': 'm'}),
promotes_inputs=['chord'])
self.add_subsystem('calc', om.ExecComp('Out = Thrust * 2',
Out={'value': np.zeros(nn), 'units': 'N'},
Thrust={'value': np.zeros(nn), 'units': 'N'}),
promotes_inputs=['Thrust'],
promotes_outputs=['Out'])
class TestSimulateShapedParams(unittest.TestCase):
def test_shaped_params(self):
main_tx = dm.Radau(num_segments=1, order=3, compressed=False)
p = om.Problem(model=om.Group())
p.driver = om.ScipyOptimizeDriver()
des_vars = p.model.add_subsystem('des_vars', om.IndepVarComp(), promotes_outputs=['*'])
des_vars.add_output('chord', val=4 * np.ones(4), units='inch')
hop0 = dm.Trajectory()
p.model.add_subsystem('hop0', hop0)
main_phase = hop0.add_phase(name='main_phase',
phase=MainPhase(transcription=main_tx))
main_phase.set_time_options(fix_initial=True, fix_duration=True, units='s')
main_phase.add_parameter('chord', targets='chord', shape=(4,), units='inch',
dynamic=False)
p.model.connect('chord', 'hop0.main_phase.parameters:chord')
main_phase.add_state('impulse', fix_initial=True, fix_final=False, units='N*s',
rate_source='Out',
solve_segments=False)
main_phase.add_polynomial_control('Thrust', units='N',
targets='Thrust',
lower=-3450, upper=-500,
order=5, opt=True)
main_phase.add_objective('impulse', loc='final', ref=-1)
p.setup(mode='auto', check=['unconnected_inputs'], force_alloc_complex=True)
p['hop0.main_phase.t_initial'] = 0.0
p['hop0.main_phase.t_duration'] = 10
p['hop0.main_phase.polynomial_controls:Thrust'][:, 0] = -3400
p['hop0.main_phase.states:impulse'] = main_phase.interpolate(ys=[0, 0], nodes='state_input')
p.run_driver()
try:
hop0.simulate()
except:
self.fail('Simulate did not correctly complete.')
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
the-stack_0_24701
|
import sys
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling1D, AveragePooling1D
sys.path.insert(0, "/home/cirl/Amir/Human-Activity-EEG-Accelerometer")
import numpy as np
import os
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout, Flatten
import time
import tensorflow as tf
import random as rn
from keras import backend as K, optimizers
from DeepACC.input_preparation import build_inputs
from keras.callbacks import EarlyStopping, CSVLogger
from DeepEEG.evaluation import compute_accuracy, evalRes
from keras.utils.vis_utils import plot_model
from keras.layers.recurrent import LSTM
from keras.layers.convolutional import Conv2D, Conv1D, SeparableConv1D
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
# https://pdfs.semanticscholar.org/df0b/05d8985846e694cda62d41a04e7c85090fa6.pdf
rn.seed(12345)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(3)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
session_conf.gpu_options.allow_growth = True
tf.set_random_seed(1234)
classes = 2
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.set_random_seed(1234)
#16
def build_model(X_train, row, cell):
model = Sequential()
model.add(LSTM(64, activation='tanh', recurrent_activation='hard_sigmoid', \
use_bias=True, kernel_initializer='glorot_uniform', \
recurrent_initializer='orthogonal', \
unit_forget_bias=True, kernel_regularizer=None, \
recurrent_regularizer=None, \
bias_regularizer=None, activity_regularizer=None, \
kernel_constraint=None, recurrent_constraint=None, \
bias_constraint=None, dropout=0.2 , recurrent_dropout=0.0, \
implementation=1, return_sequences=True, return_state=False, \
go_backwards=False, stateful=False, unroll=True,
input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(LSTM(32, activation='tanh', recurrent_activation='hard_sigmoid', \
use_bias=True, kernel_initializer='glorot_uniform', \
recurrent_initializer='orthogonal', \
unit_forget_bias=True, kernel_regularizer=None, \
recurrent_regularizer=None, \
bias_regularizer=None, activity_regularizer=None, \
kernel_constraint=None, recurrent_constraint=None, \
bias_constraint=None, dropout=0.0 , recurrent_dropout=0.0, \
implementation=1, return_sequences=False, return_state=False, \
go_backwards=False, stateful=False, unroll=True))
model.add(Dense(64, activation="tanh"))
model.add(Dense(3, activation="softmax"))
opt = optimizers.adam(lr=0.001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy'])
plot_model(model, to_file='model.png', show_shapes=True)
model.summary()
return model
if __name__ == '__main__':
X_train, y_train, X_test, y_test, train_labels, test_labels = build_inputs(False, 75)
epochs = 50 # 21
model = build_model(X_train, 0, 0)
name = "{}-{}".format(0, 0)
early_stop = EarlyStopping(monitor='val_acc', min_delta=0.1, patience=2, mode='auto')
csv_logger = CSVLogger('res/training.csv', append=True, separator=',')
history_callback = model.fit(X_train, y_train, epochs=epochs, batch_size=500,
validation_split=0.2, verbose=1, callbacks=[csv_logger, early_stop])
pred = model.predict(X_test)
compute_accuracy(name, pred, test_labels, history_callback)
evalRes(pred, test_labels, y_test, name)
|
the-stack_0_24705
|
# Python
import os
import sys
# Django
import django
from django.conf import global_settings
# DJ Database URL
try:
import dj_database_url
except ImportError:
dj_database_url = None
# Update this module's local settings from the global settings module.
this_module = sys.modules[__name__]
for setting in dir(global_settings):
if setting == setting.upper():
setattr(this_module, setting, getattr(global_settings, setting))
# Absolute path to the directory containing this Django project.
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'test_project.sqlite3'),
}
}
if dj_database_url:
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
SITE_ID = 1
SECRET_KEY = '1a93a98e-03e7-4787-b099-0209705b80aa'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['*']
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'public', 'static')
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
try:
import whitenoise # noqa
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
except ImportError:
pass
if django.VERSION >= (1, 10):
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
else:
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
os.path.join(PROJECT_ROOT, 'templates'),
],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ROOT_URLCONF = 'test_project.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django_extensions',
'sortedm2m',
'fortunecookie',
'test_project.test_app',
)
WSGI_APPLICATION = 'test_project.wsgi.application'
INTERNAL_IPS = ('127.0.0.1',)
try:
import debug_toolbar # noqa
INSTALLED_APPS += ('debug_toolbar',)
if django.VERSION >= (1, 10):
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
else:
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
except ImportError:
pass
RUNSERVER_DEFAULT_ADDR = '127.0.0.1'
RUNSERVER_DEFAULT_PORT = '8066'
|
the-stack_0_24706
|
#!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import errno
import os
import os.path
import pathlib
import subprocess
import time
import typing
import unittest
from eden.cli.daemon import did_process_exit
from eden.test_support.environment_variable import EnvironmentVariableMixin
from eden.test_support.temporary_directory import TemporaryDirectoryMixin
from .lib.linux import ProcessID, is_cgroup_v2_mounted
from .lib.systemd import (
SystemdService,
SystemdUnitName,
SystemdUserServiceManager,
SystemdUserServiceManagerMixin,
temporary_systemd_user_service_manager,
)
class TemporarySystemdUserServiceManagerTest(
unittest.TestCase, SystemdUserServiceManagerMixin
):
def test_unit_paths_includes_manager_specific_directories(self) -> None:
systemd = self.make_temporary_systemd_user_service_manager()
paths = systemd.get_unit_paths()
self.assertIn(systemd.xdg_runtime_dir / "systemd" / "user.control", paths)
def test_no_units_are_active(self) -> None:
def is_interesting_unit(unit_name: SystemdUnitName) -> bool:
if unit_name in ("-.slice"):
return False
if unit_name in ("dbus.service", "dbus.socket"):
return False
if unit_name.endswith(".device"):
return False
if unit_name.endswith(".mount") or unit_name.endswith(".swap"):
return False
if unit_name.endswith(".scope"):
return False
if unit_name.endswith(".target"):
return False
return True
systemd = self.make_temporary_systemd_user_service_manager()
unit_names = systemd.get_active_unit_names()
self.assertEqual(
[unit for unit in unit_names if is_interesting_unit(unit)],
[],
f"systemd should be managing no interesting units\n"
f"All units: {unit_names}",
)
def test_manager_process_id_is_valid(self) -> None:
with temporary_systemd_user_service_manager() as systemd:
self.assertTrue(does_process_exist(systemd.process_id))
def test_closing_manager_kills_process(self) -> None:
with temporary_systemd_user_service_manager() as systemd:
process_id = systemd.process_id
self.assertFalse(does_process_exist(process_id))
def test_exit_kills_manager(self) -> None:
systemd = self.make_temporary_systemd_user_service_manager()
process_id = systemd.process_id
systemd.exit()
self.assertFalse(systemd.is_alive())
self.assertTrue(did_process_exit(process_id))
class TemporarySystemdUserServiceManagerIsolationTest(
unittest.TestCase,
EnvironmentVariableMixin,
SystemdUserServiceManagerMixin,
TemporaryDirectoryMixin,
):
def test_services_with_same_name_by_different_managers_are_independent(
self
) -> None:
systemd_1 = self.make_temporary_systemd_user_service_manager()
systemd_2 = self.make_temporary_systemd_user_service_manager()
unit_name = "isolation_test.service"
service_1 = systemd_1.systemd_run(
command=["/bin/sleep", "10"],
properties={"RemainAfterExit": "yes"},
extra_env={},
unit_name=unit_name,
)
service_2 = systemd_2.systemd_run(
command=["/bin/sleep", "10"],
properties={"RemainAfterExit": "yes"},
extra_env={},
unit_name=unit_name,
)
service_1.stop()
self.assertEqual(
(service_2.query_active_state(), service_2.query_sub_state()),
("active", "running"),
"Stopping systemd_1's service should not stop systemd_2's service",
)
def test_manager_cannot_see_services_of_different_manager(self) -> None:
systemd_1 = self.make_temporary_systemd_user_service_manager()
systemd_2 = self.make_temporary_systemd_user_service_manager()
service = systemd_1.systemd_run(
command=["/bin/sleep", "10"],
properties={"RemainAfterExit": "yes"},
extra_env={},
)
self.assertIn(
service.unit_name,
systemd_1.get_active_unit_names(),
"systemd_1 should see its own unit",
)
self.assertNotIn(
service.unit_name,
systemd_2.get_active_unit_names(),
"systemd_2 should not see systemd_1's unit",
)
def test_environment_variables_do_not_leak_to_services(self) -> None:
spy_variable_name = "EDEN_TEST_VARIABLE"
self.set_environment_variable(
spy_variable_name, "this should not propogate to the service"
)
systemd = self.make_temporary_systemd_user_service_manager()
env_variables = self.get_service_environment(systemd)
env_variable_names = [name for (name, value) in env_variables]
self.assertIn(
"PATH",
env_variable_names,
"Sanity check: $PATH should be set in service environment",
)
self.assertNotIn(spy_variable_name, env_variable_names)
def test_path_environment_variable_is_forced_to_default(self) -> None:
# See https://www.freedesktop.org/software/systemd/man/systemd.exec.html#%24PATH
allowed_path_entries = {
"/usr/local/sbin",
"/usr/local/bin",
"/usr/sbin",
"/usr/bin",
"/sbin",
"/bin",
}
spy_path_entry = self.make_temporary_directory()
self.set_environment_variable(
"PATH", spy_path_entry + os.pathsep + os.environ["PATH"]
)
systemd = self.make_temporary_systemd_user_service_manager()
env_variables = self.get_service_environment(systemd)
path_value = [value for (name, value) in env_variables if name == "PATH"][0]
for path_entry in path_value.split(os.pathsep):
self.assertIn(
path_entry,
allowed_path_entries,
"$PATH should only include default paths\n$PATH: {path_value!r}",
)
def get_service_environment(
self, systemd: SystemdUserServiceManager
) -> typing.List[typing.Tuple[str, str]]:
env_output_file = pathlib.Path(self.make_temporary_directory()) / "env_output"
env_service = systemd.systemd_run(
command=["/usr/bin/env", "-0"],
properties={"StandardOutput": f"file:{env_output_file}"},
extra_env={},
)
env_service.poll_until_inactive(timeout=10)
def parse_entry(entry_str: str) -> typing.Tuple[str, str]:
[name, value] = entry_str.split("=", 1)
return (name, value)
env_output = env_output_file.read_text()
return [
parse_entry(entry_str) for entry_str in env_output.split("\0") if entry_str
]
class SystemdServiceTest(
unittest.TestCase, TemporaryDirectoryMixin, SystemdUserServiceManagerMixin
):
systemd: SystemdUserServiceManager
def setUp(self) -> None:
super().setUp()
self.systemd = self.make_temporary_systemd_user_service_manager()
def test_str_of_service_includes_unit_name_and_systemd_directory(self) -> None:
service = SystemdService(unit_name="my-test-unit.service", systemd=self.systemd)
self.assertRegex(
str(service), r"^my-test-unit\.service \(XDG_RUNTIME_DIR=/\S+\)$"
)
def test_repr_of_service_includes_unit_name_and_systemd_directory(self) -> None:
service = SystemdService(unit_name="my-test-unit.service", systemd=self.systemd)
self.assertRegex(
repr(service),
r"^SystemdService\("
r".*"
r"unit_name='my-test-unit\.service'"
r".*"
r"systemd=SystemdUserServiceManager\("
r".*"
r"xdg_runtime_dir=PosixPath\('\S+'\)"
r".*"
r"\)"
r".*"
r"\)",
)
def test_start_executes_oneshot_service(self) -> None:
message_file = pathlib.Path(self.make_temporary_directory()) / "message.txt"
service = self.enable_service(
"test-SystemdServiceTest.service",
f"""
[Service]
Type=oneshot
ExecStart=/bin/echo "Hello from service"
StandardOutput=file:{message_file}
""",
)
service.start()
self.assertEqual(message_file.read_text(), "Hello from service\n")
def test_start_executes_oneshot_instanced_service(self) -> None:
temp_dir = pathlib.Path(self.make_temporary_directory())
message_file = temp_dir / "message.txt"
unit_file = temp_dir / "[email protected]"
unit_file.write_text(
f"""
[Service]
Type=oneshot
ExecStart=/bin/echo "instance: %i"
StandardOutput=file:{message_file}
"""
)
self.systemd.enable_runtime_unit_from_file(unit_file=unit_file)
service = self.systemd.get_service("[email protected]")
service.start()
self.assertEqual(message_file.read_text(), "instance: hello\n")
def test_unstarted_service_is_inactive(self) -> None:
service = self.enable_service(
"test-SystemdServiceTest.service",
"""
[Service]
ExecStart=/bin/false
""",
)
self.assertEqual(
(service.query_active_state(), service.query_sub_state()),
("inactive", "dead"),
)
def test_running_simple_service_is_active(self) -> None:
service = self.enable_service(
"test-SystemdServiceTest.service",
"""
[Service]
Type=simple
ExecStart=/bin/sleep 30
""",
)
service.start()
self.assertEqual(
(service.query_active_state(), service.query_sub_state()),
("active", "running"),
)
def test_service_exiting_with_code_1_is_failed(self) -> None:
service = self.enable_service(
"test-SystemdServiceTest.service",
"""
[Service]
Type=notify
ExecStart=/bin/false
""",
)
try:
service.start()
except subprocess.CalledProcessError:
pass
self.assertEqual(
(service.query_active_state(), service.query_sub_state()),
("failed", "failed"),
)
@unittest.skipIf(
not is_cgroup_v2_mounted(),
"T36934106: Fix EdenFS systemd integration tests for cgroups v1",
)
def test_processes_of_forking_service_includes_all_child_processes(self) -> None:
service = self.enable_service(
"test-SystemdServiceTest.service",
"""
[Service]
Type=forking
ExecStart=/bin/sh -c "/bin/sleep 30 | /bin/cat & exit"
""",
)
service.start()
# HACK(strager): Sometimes, /bin/sh appears inside the cgroup's process
# list. Wait a bit to reduce test flakiness.
# TODO(strager): Figure out why sometimes /bin/sh is still inside the
# cgroup's process list.
time.sleep(1)
process_ids = service.query_process_ids()
process_exes = [get_resolved_process_exe_or_error(pid) for pid in process_ids]
expected_process_exes = [
pathlib.Path(p).resolve() for p in ["/bin/sleep", "/bin/cat"]
]
self.assertCountEqual(
process_exes, expected_process_exes, f"Process IDs: {process_ids}"
)
def enable_service(
self, service_name: SystemdUnitName, unit_file_content: str
) -> SystemdService:
unit_file = pathlib.Path(self.make_temporary_directory()) / service_name
unit_file.write_text(unit_file_content)
self.systemd.enable_runtime_unit_from_file(unit_file=unit_file)
return self.systemd.get_service(service_name)
def get_process_exe(process_id: ProcessID) -> pathlib.Path:
return pathlib.Path(os.readlink(pathlib.Path("/proc") / str(process_id) / "exe"))
def get_process_exe_or_error(
process_id: ProcessID
) -> typing.Union[pathlib.Path, OSError]:
try:
return get_process_exe(process_id)
except OSError as e:
return e
def get_resolved_process_exe_or_error(
process_id: ProcessID
) -> typing.Union[pathlib.Path, OSError]:
try:
return get_process_exe(process_id).resolve()
except OSError as e:
return e
def does_process_exist(process_id: int) -> bool:
try:
os.kill(process_id, 0)
except OSError as ex:
if ex.errno == errno.ESRCH:
return False
if ex.errno == errno.EPERM:
return True
raise ex
else:
return True
|
the-stack_0_24707
|
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019-2020 The Matrix.org Foundation C.I.C.
# Copyrignt 2020 Sorunome
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple
from canonicaljson import encode_canonical_json
from twisted.internet.interfaces import IDelayedCall
from synapse import event_auth
from synapse.api.constants import (
EventContentFields,
EventTypes,
GuestAccess,
Membership,
RelationTypes,
UserTypes,
)
from synapse.api.errors import (
AuthError,
Codes,
ConsentNotGivenError,
NotFoundError,
ShadowBanError,
SynapseError,
UnsupportedRoomVersionError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.api.urls import ConsentURIBuilder
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester
from synapse.util import json_decoder, json_encoder, log_failure
from synapse.util.async_helpers import Linearizer, gather_results, unwrapFirstError
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
from synapse.events.third_party_rules import ThirdPartyEventRules
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class MessageHandler:
"""Contains some read only APIs to get state about a room"""
def __init__(self, hs: "HomeServer"):
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.state = hs.get_state_handler()
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._event_serializer = hs.get_event_client_serializer()
self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages
# The scheduled call to self._expire_event. None if no call is currently
# scheduled.
self._scheduled_expiry: Optional[IDelayedCall] = None
if not hs.config.worker.worker_app:
run_as_background_process(
"_schedule_next_expiry", self._schedule_next_expiry
)
async def get_room_data(
self,
user_id: str,
room_id: str,
event_type: str,
state_key: str,
) -> Optional[EventBase]:
"""Get data from a room.
Args:
user_id
room_id
event_type
state_key
Returns:
The path data content.
Raises:
SynapseError or AuthError if the user is not in the room
"""
(
membership,
membership_event_id,
) = await self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
)
if membership == Membership.JOIN:
data = await self.state.get_current_state(room_id, event_type, state_key)
elif membership == Membership.LEAVE:
key = (event_type, state_key)
# If the membership is not JOIN, then the event ID should exist.
assert (
membership_event_id is not None
), "check_user_in_room_or_world_readable returned invalid data"
room_state = await self.state_store.get_state_for_events(
[membership_event_id], StateFilter.from_types([key])
)
data = room_state[membership_event_id].get(key)
else:
# check_user_in_room_or_world_readable, if it doesn't raise an AuthError, should
# only ever return a Membership.JOIN/LEAVE object
#
# Safeguard in case it returned something else
logger.error(
"Attempted to retrieve data from a room for a user that has never been in it. "
"This should not have happened."
)
raise SynapseError(403, "User not in room", errcode=Codes.FORBIDDEN)
return data
async def get_state_events(
self,
user_id: str,
room_id: str,
state_filter: Optional[StateFilter] = None,
at_token: Optional[StreamToken] = None,
is_guest: bool = False,
) -> List[dict]:
"""Retrieve all state events for a given room. If the user is
joined to the room then return the current state. If the user has
left the room return the state events from when they left. If an explicit
'at' parameter is passed, return the state events as of that event, if
visible.
Args:
user_id: The user requesting state events.
room_id: The room ID to get all state events from.
state_filter: The state filter used to fetch state from the database.
at_token: the stream token of the at which we are requesting
the stats. If the user is not allowed to view the state as of that
stream token, we raise a 403 SynapseError. If None, returns the current
state based on the current_state_events table.
is_guest: whether this user is a guest
Returns:
A list of dicts representing state events. [{}, {}, {}]
Raises:
NotFoundError (404) if the at token does not yield an event
AuthError (403) if the user doesn't have permission to view
members of this room.
"""
state_filter = state_filter or StateFilter.all()
if at_token:
# FIXME this claims to get the state at a stream position, but
# get_recent_events_for_room operates by topo ordering. This therefore
# does not reliably give you the state at the given stream position.
# (https://github.com/matrix-org/synapse/issues/3305)
last_events, _ = await self.store.get_recent_events_for_room(
room_id, end_token=at_token.room_key, limit=1
)
if not last_events:
raise NotFoundError("Can't find event for token %s" % (at_token,))
last_event = last_events[0]
# check whether the user is in the room at that time to determine
# whether they should be treated as peeking.
state_map = await self.state_store.get_state_for_event(
last_event.event_id,
StateFilter.from_types([(EventTypes.Member, user_id)]),
)
joined = False
membership_event = state_map.get((EventTypes.Member, user_id))
if membership_event:
joined = membership_event.membership == Membership.JOIN
is_peeking = not joined
visible_events = await filter_events_for_client(
self.storage,
user_id,
last_events,
filter_send_to_client=False,
is_peeking=is_peeking,
)
if visible_events:
room_state_events = await self.state_store.get_state_for_events(
[last_event.event_id], state_filter=state_filter
)
room_state: Mapping[Any, EventBase] = room_state_events[
last_event.event_id
]
else:
raise AuthError(
403,
"User %s not allowed to view events in room %s at token %s"
% (user_id, room_id, at_token),
)
else:
(
membership,
membership_event_id,
) = await self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
)
if membership == Membership.JOIN:
state_ids = await self.store.get_filtered_current_state_ids(
room_id, state_filter=state_filter
)
room_state = await self.store.get_events(state_ids.values())
elif membership == Membership.LEAVE:
# If the membership is not JOIN, then the event ID should exist.
assert (
membership_event_id is not None
), "check_user_in_room_or_world_readable returned invalid data"
room_state_events = await self.state_store.get_state_for_events(
[membership_event_id], state_filter=state_filter
)
room_state = room_state_events[membership_event_id]
now = self.clock.time_msec()
events = await self._event_serializer.serialize_events(
room_state.values(), now, bundle_aggregations=True
)
return events
async def get_joined_members(self, requester: Requester, room_id: str) -> dict:
"""Get all the joined members in the room and their profile information.
If the user has left the room return the state events from when they left.
Args:
requester: The user requesting state events.
room_id: The room ID to get all state events from.
Returns:
A dict of user_id to profile info
"""
user_id = requester.user.to_string()
if not requester.app_service:
# We check AS auth after fetching the room membership, as it
# requires us to pull out all joined members anyway.
membership, _ = await self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
)
if membership != Membership.JOIN:
raise NotImplementedError(
"Getting joined members after leaving is not implemented"
)
users_with_profile = await self.store.get_users_in_room_with_profiles(room_id)
# If this is an AS, double check that they are allowed to see the members.
# This can either be because the AS user is in the room or because there
# is a user in the room that the AS is "interested in"
if requester.app_service and user_id not in users_with_profile:
for uid in users_with_profile:
if requester.app_service.is_interested_in_user(uid):
break
else:
# Loop fell through, AS has no interested users in room
raise AuthError(403, "Appservice not in room")
return {
user_id: {
"avatar_url": profile.avatar_url,
"display_name": profile.display_name,
}
for user_id, profile in users_with_profile.items()
}
def maybe_schedule_expiry(self, event: EventBase) -> None:
"""Schedule the expiry of an event if there's not already one scheduled,
or if the one running is for an event that will expire after the provided
timestamp.
This function needs to invalidate the event cache, which is only possible on
the master process, and therefore needs to be run on there.
Args:
event: The event to schedule the expiry of.
"""
expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
if not isinstance(expiry_ts, int) or event.is_state():
return
# _schedule_expiry_for_event won't actually schedule anything if there's already
# a task scheduled for a timestamp that's sooner than the provided one.
self._schedule_expiry_for_event(event.event_id, expiry_ts)
async def _schedule_next_expiry(self) -> None:
"""Retrieve the ID and the expiry timestamp of the next event to be expired,
and schedule an expiry task for it.
If there's no event left to expire, set _expiry_scheduled to None so that a
future call to save_expiry_ts can schedule a new expiry task.
"""
# Try to get the expiry timestamp of the next event to expire.
res = await self.store.get_next_event_to_expire()
if res:
event_id, expiry_ts = res
self._schedule_expiry_for_event(event_id, expiry_ts)
def _schedule_expiry_for_event(self, event_id: str, expiry_ts: int) -> None:
"""Schedule an expiry task for the provided event if there's not already one
scheduled at a timestamp that's sooner than the provided one.
Args:
event_id: The ID of the event to expire.
expiry_ts: The timestamp at which to expire the event.
"""
if self._scheduled_expiry:
# If the provided timestamp refers to a time before the scheduled time of the
# next expiry task, cancel that task and reschedule it for this timestamp.
next_scheduled_expiry_ts = self._scheduled_expiry.getTime() * 1000
if expiry_ts < next_scheduled_expiry_ts:
self._scheduled_expiry.cancel()
else:
return
# Figure out how many seconds we need to wait before expiring the event.
now_ms = self.clock.time_msec()
delay = (expiry_ts - now_ms) / 1000
# callLater doesn't support negative delays, so trim the delay to 0 if we're
# in that case.
if delay < 0:
delay = 0
logger.info("Scheduling expiry for event %s in %.3fs", event_id, delay)
self._scheduled_expiry = self.clock.call_later(
delay,
run_as_background_process,
"_expire_event",
self._expire_event,
event_id,
)
async def _expire_event(self, event_id: str) -> None:
"""Retrieve and expire an event that needs to be expired from the database.
If the event doesn't exist in the database, log it and delete the expiry date
from the database (so that we don't try to expire it again).
"""
assert self._ephemeral_events_enabled
self._scheduled_expiry = None
logger.info("Expiring event %s", event_id)
try:
# Expire the event if we know about it. This function also deletes the expiry
# date from the database in the same database transaction.
await self.store.expire_event(event_id)
except Exception as e:
logger.error("Could not expire event %s: %r", event_id, e)
# Schedule the expiry of the next event to expire.
await self._schedule_next_expiry()
# The duration (in ms) after which rooms should be removed
# `_rooms_to_exclude_from_dummy_event_insertion` (with the effect that we will try
# to generate a dummy event for them once more)
#
_DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY = 7 * 24 * 60 * 60 * 1000
class EventCreationHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self._event_auth_handler = hs.get_event_auth_handler()
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.validator = EventValidator()
self.profile_handler = hs.get_profile_handler()
self.event_builder_factory = hs.get_event_builder_factory()
self.server_name = hs.hostname
self.notifier = hs.get_notifier()
self.config = hs.config
self.require_membership_for_aliases = (
hs.config.server.require_membership_for_aliases
)
self._events_shard_config = self.config.worker.events_shard_config
self._instance_name = hs.get_instance_name()
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
self.membership_types_to_include_profile_data_in = {
Membership.JOIN,
Membership.KNOCK,
}
if self.hs.config.server.include_profile_data_on_invite:
self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
self.request_ratelimiter = hs.get_request_ratelimiter()
# We arbitrarily limit concurrent event creation for a room to 5.
# This is to stop us from diverging history *too* much.
self.limiter = Linearizer(max_count=5, name="room_event_creation_limit")
self.action_generator = hs.get_action_generator()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules: "ThirdPartyEventRules" = (
self.hs.get_third_party_event_rules()
)
self._block_events_without_consent_error = (
self.config.consent.block_events_without_consent_error
)
# we need to construct a ConsentURIBuilder here, as it checks that the necessary
# config options, but *only* if we have a configuration for which we are
# going to need it.
if self._block_events_without_consent_error:
self._consent_uri_builder = ConsentURIBuilder(self.config)
# Rooms which should be excluded from dummy insertion. (For instance,
# those without local users who can send events into the room).
#
# map from room id to time-of-last-attempt.
#
self._rooms_to_exclude_from_dummy_event_insertion: Dict[str, int] = {}
# The number of forward extremeities before a dummy event is sent.
self._dummy_events_threshold = hs.config.server.dummy_events_threshold
if (
self.config.worker.run_background_tasks
and self.config.server.cleanup_extremities_with_dummy_events
):
self.clock.looping_call(
lambda: run_as_background_process(
"send_dummy_events_to_fill_extremities",
self._send_dummy_events_to_fill_extremities,
),
5 * 60 * 1000,
)
self._message_handler = hs.get_message_handler()
self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages
self._external_cache = hs.get_external_cache()
# Stores the state groups we've recently added to the joined hosts
# external cache. Note that the timeout must be significantly less than
# the TTL on the external cache.
self._external_cache_joined_hosts_updates: Optional[ExpiringCache] = None
if self._external_cache.is_enabled():
self._external_cache_joined_hosts_updates = ExpiringCache(
"_external_cache_joined_hosts_updates",
self.clock,
expiry_ms=30 * 60 * 1000,
)
async def create_event(
self,
requester: Requester,
event_dict: dict,
txn_id: Optional[str] = None,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
allow_no_prev_events: bool = False,
depth: Optional[int] = None,
) -> Tuple[EventBase, EventContext]:
"""
Given a dict from a client, create a new event.
Creates an FrozenEvent object, filling out auth_events, prev_events,
etc.
Adds display names to Join membership events.
Args:
requester
event_dict: An entire event
txn_id
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
If None, they will be requested from the database.
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
If non-None, prev_event_ids must also be provided.
require_consent: Whether to check if the requester has
consented to the privacy policy.
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
Raises:
ResourceLimitError if server is blocked to some resource being
exceeded
Returns:
Tuple of created event, Context
"""
await self.auth.check_auth_blocking(requester=requester)
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
room_version_id = event_dict["content"]["room_version"]
room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not room_version_obj:
# this can happen if support is withdrawn for a room version
raise UnsupportedRoomVersionError(room_version_id)
else:
try:
room_version_obj = await self.store.get_room_version(
event_dict["room_id"]
)
except NotFoundError:
raise AuthError(403, "Unknown room")
builder = self.event_builder_factory.for_room_version(
room_version_obj, event_dict
)
self.validator.validate_builder(builder)
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
target = UserID.from_string(builder.state_key)
if membership in self.membership_types_to_include_profile_data_in:
# If event doesn't include a display name, add one.
profile = self.profile_handler
content = builder.content
try:
if "displayname" not in content:
displayname = await profile.get_displayname(target)
if displayname is not None:
content["displayname"] = displayname
if "avatar_url" not in content:
avatar_url = await profile.get_avatar_url(target)
if avatar_url is not None:
content["avatar_url"] = avatar_url
except Exception as e:
logger.info(
"Failed to get profile information for %r: %s", target, e
)
is_exempt = await self._is_exempt_from_privacy_policy(builder, requester)
if require_consent and not is_exempt:
await self.assert_accepted_privacy_policy(requester)
if requester.access_token_id is not None:
builder.internal_metadata.token_id = requester.access_token_id
if txn_id is not None:
builder.internal_metadata.txn_id = txn_id
builder.internal_metadata.outlier = outlier
builder.internal_metadata.historical = historical
event, context = await self.create_new_client_event(
builder=builder,
requester=requester,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
depth=depth,
allow_no_prev_events=allow_no_prev_events,
)
# In an ideal world we wouldn't need the second part of this condition. However,
# this behaviour isn't spec'd yet, meaning we should be able to deactivate this
# behaviour. Another reason is that this code is also evaluated each time a new
# m.room.aliases event is created, which includes hitting a /directory route.
# Therefore not including this condition here would render the similar one in
# synapse.handlers.directory pointless.
if builder.type == EventTypes.Aliases and self.require_membership_for_aliases:
# Ideally we'd do the membership check in event_auth.check(), which
# describes a spec'd algorithm for authenticating events received over
# federation as well as those created locally. As of room v3, aliases events
# can be created by users that are not in the room, therefore we have to
# tolerate them in event_auth.check().
prev_state_ids = await context.get_prev_state_ids()
prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
prev_event = (
await self.store.get_event(prev_event_id, allow_none=True)
if prev_event_id
else None
)
if not prev_event or prev_event.membership != Membership.JOIN:
logger.warning(
(
"Attempt to send `m.room.aliases` in room %s by user %s but"
" membership is %s"
),
event.room_id,
event.sender,
prev_event.membership if prev_event else None,
)
raise AuthError(
403, "You must be in the room to create an alias for it"
)
self.validator.validate_new(event, self.config)
return event, context
async def _is_exempt_from_privacy_policy(
self, builder: EventBuilder, requester: Requester
) -> bool:
""" "Determine if an event to be sent is exempt from having to consent
to the privacy policy
Args:
builder: event being created
requester: user requesting this event
Returns:
true if the event can be sent without the user consenting
"""
# the only thing the user can do is join the server notices room.
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
if membership == Membership.JOIN:
return await self._is_server_notices_room(builder.room_id)
elif membership == Membership.LEAVE:
# the user is always allowed to leave (but not kick people)
return builder.state_key == requester.user.to_string()
return False
async def _is_server_notices_room(self, room_id: str) -> bool:
if self.config.servernotices.server_notices_mxid is None:
return False
user_ids = await self.store.get_users_in_room(room_id)
return self.config.servernotices.server_notices_mxid in user_ids
async def assert_accepted_privacy_policy(self, requester: Requester) -> None:
"""Check if a user has accepted the privacy policy
Called when the given user is about to do something that requires
privacy consent. We see if the user is exempt and otherwise check that
they have given consent. If they have not, a ConsentNotGiven error is
raised.
Args:
requester: The user making the request
Returns:
Returns normally if the user has consented or is exempt
Raises:
ConsentNotGivenError: if the user has not given consent yet
"""
if self._block_events_without_consent_error is None:
return
# exempt AS users from needing consent
if requester.app_service is not None:
return
user_id = requester.authenticated_entity
if not user_id.startswith("@"):
# The authenticated entity might not be a user, e.g. if it's the
# server puppetting the user.
return
user = UserID.from_string(user_id)
# exempt the system notices user
if (
self.config.servernotices.server_notices_mxid is not None
and user_id == self.config.servernotices.server_notices_mxid
):
return
u = await self.store.get_user_by_id(user_id)
assert u is not None
if u["user_type"] in (UserTypes.SUPPORT, UserTypes.BOT):
# support and bot users are not required to consent
return
if u["appservice_id"] is not None:
# users registered by an appservice are exempt
return
if u["consent_version"] == self.config.consent.user_consent_version:
return
consent_uri = self._consent_uri_builder.build_user_consent_uri(user.localpart)
msg = self._block_events_without_consent_error % {"consent_uri": consent_uri}
raise ConsentNotGivenError(msg=msg, consent_uri=consent_uri)
async def deduplicate_state_event(
self, event: EventBase, context: EventContext
) -> Optional[EventBase]:
"""
Checks whether event is in the latest resolved state in context.
Args:
event: The event to check for duplication.
context: The event context.
Returns:
The previous version of the event is returned, if it is found in the
event context. Otherwise, None is returned.
"""
prev_state_ids = await context.get_prev_state_ids()
prev_event_id = prev_state_ids.get((event.type, event.state_key))
if not prev_event_id:
return None
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
if not prev_event:
return None
if prev_event and event.user_id == prev_event.user_id:
prev_content = encode_canonical_json(prev_event.content)
next_content = encode_canonical_json(event.content)
if prev_content == next_content:
return prev_event
return None
async def create_and_send_nonmember_event(
self,
requester: Requester,
event_dict: dict,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
ratelimit: bool = True,
txn_id: Optional[str] = None,
ignore_shadow_ban: bool = False,
outlier: bool = False,
historical: bool = False,
depth: Optional[int] = None,
) -> Tuple[EventBase, int]:
"""
Creates an event, then sends it.
See self.create_event and self.handle_new_client_event.
Args:
requester: The requester sending the event.
event_dict: An entire event.
prev_event_ids:
The event IDs to use as the prev events.
Should normally be left as None to automatically request them
from the database.
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
If non-None, prev_event_ids must also be provided.
ratelimit: Whether to rate limit this send.
txn_id: The transaction ID.
ignore_shadow_ban: True if shadow-banned users should be allowed to
send this event.
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
Returns:
The event, and its stream ordering (if deduplication happened,
the previous, duplicate event).
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
if event_dict["type"] == EventTypes.Member:
raise SynapseError(
500, "Tried to send member event through non-member codepath"
)
if not ignore_shadow_ban and requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
# We limit the number of concurrent event sends in a room so that we
# don't fork the DAG too much. If we don't limit then we can end up in
# a situation where event persistence can't keep up, causing
# extremities to pile up, which in turn leads to state resolution
# taking longer.
with (await self.limiter.queue(event_dict["room_id"])):
if txn_id and requester.access_token_id:
existing_event_id = await self.store.get_event_id_from_transaction_id(
event_dict["room_id"],
requester.user.to_string(),
requester.access_token_id,
txn_id,
)
if existing_event_id:
event = await self.store.get_event(existing_event_id)
# we know it was persisted, so must have a stream ordering
assert event.internal_metadata.stream_ordering
return event, event.internal_metadata.stream_ordering
event, context = await self.create_event(
requester,
event_dict,
txn_id=txn_id,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
outlier=outlier,
historical=historical,
depth=depth,
)
assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
event.sender,
)
spam_error = await self.spam_checker.check_event_for_spam(event)
if spam_error:
if not isinstance(spam_error, str):
spam_error = "Spam is not permitted here"
raise SynapseError(403, spam_error, Codes.FORBIDDEN)
ev = await self.handle_new_client_event(
requester=requester,
event=event,
context=context,
ratelimit=ratelimit,
ignore_shadow_ban=ignore_shadow_ban,
)
# we know it was persisted, so must have a stream ordering
assert ev.internal_metadata.stream_ordering
return ev, ev.internal_metadata.stream_ordering
@measure_func("create_new_client_event")
async def create_new_client_event(
self,
builder: EventBuilder,
requester: Optional[Requester] = None,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
allow_no_prev_events: bool = False,
) -> Tuple[EventBase, EventContext]:
"""Create a new event for a local client
Args:
builder:
requester:
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
If None, they will be requested from the database.
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
Returns:
Tuple of created event, context
"""
# Strip down the auth_event_ids to only what we need to auth the event.
# For example, we don't need extra m.room.member that don't match event.sender
full_state_ids_at_event = None
if auth_event_ids is not None:
# If auth events are provided, prev events must be also.
# prev_event_ids could be an empty array though.
assert prev_event_ids is not None
# Copy the full auth state before it stripped down
full_state_ids_at_event = auth_event_ids.copy()
temp_event = await builder.build(
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
depth=depth,
)
auth_events = await self.store.get_events_as_list(auth_event_ids)
# Create a StateMap[str]
auth_event_state_map = {
(e.type, e.state_key): e.event_id for e in auth_events
}
# Actually strip down and use the necessary auth events
auth_event_ids = self._event_auth_handler.compute_auth_events(
event=temp_event,
current_state_ids=auth_event_state_map,
for_verification=False,
)
if prev_event_ids is not None:
assert (
len(prev_event_ids) <= 10
), "Attempting to create an event with %i prev_events" % (
len(prev_event_ids),
)
else:
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
# Do a quick sanity check here, rather than waiting until we've created the
# event and then try to auth it (which fails with a somewhat confusing "No
# create event in auth events")
if allow_no_prev_events:
# We allow events with no `prev_events` but it better have some `auth_events`
assert (
builder.type == EventTypes.Create
# Allow an event to have empty list of prev_event_ids
# only if it has auth_event_ids.
or auth_event_ids
), "Attempting to create a non-m.room.create event with no prev_events or auth_event_ids"
else:
# we now ought to have some prev_events (unless it's a create event).
assert (
builder.type == EventTypes.Create or prev_event_ids
), "Attempting to create a non-m.room.create event with no prev_events"
event = await builder.build(
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
depth=depth,
)
# Pass on the outlier property from the builder to the event
# after it is created
if builder.internal_metadata.outlier:
event.internal_metadata.outlier = True
context = EventContext.for_outlier()
elif (
event.type == EventTypes.MSC2716_INSERTION
and full_state_ids_at_event
and builder.internal_metadata.is_historical()
):
old_state = await self.store.get_events_as_list(full_state_ids_at_event)
context = await self.state.compute_event_context(event, old_state=old_state)
else:
context = await self.state.compute_event_context(event)
if requester:
context.app_service = requester.app_service
res, new_content = await self.third_party_event_rules.check_event_allowed(
event, context
)
if res is False:
logger.info(
"Event %s forbidden by third-party rules",
event,
)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN
)
elif new_content is not None:
# the third-party rules want to replace the event. We'll need to build a new
# event.
event, context = await self._rebuild_event_after_third_party_rules(
new_content, event
)
self.validator.validate_new(event, self.config)
await self._validate_event_relation(event)
logger.debug("Created event %s", event.event_id)
return event, context
async def _validate_event_relation(self, event: EventBase) -> None:
"""
Ensure the relation data on a new event is not bogus.
Args:
event: The event being created.
Raises:
SynapseError if the event is invalid.
"""
relation = event.content.get("m.relates_to")
if not relation:
return
relation_type = relation.get("rel_type")
if not relation_type:
return
# Ensure the parent is real.
relates_to = relation.get("event_id")
if not relates_to:
return
parent_event = await self.store.get_event(relates_to, allow_none=True)
if parent_event:
# And in the same room.
if parent_event.room_id != event.room_id:
raise SynapseError(400, "Relations must be in the same room")
else:
# There must be some reason that the client knows the event exists,
# see if there are existing relations. If so, assume everything is fine.
if not await self.store.event_is_target_of_relation(relates_to):
# Otherwise, the client can't know about the parent event!
raise SynapseError(400, "Can't send relation to unknown event")
# If this event is an annotation then we check that that the sender
# can't annotate the same way twice (e.g. stops users from liking an
# event multiple times).
if relation_type == RelationTypes.ANNOTATION:
aggregation_key = relation["key"]
already_exists = await self.store.has_user_annotated_event(
relates_to, event.type, aggregation_key, event.sender
)
if already_exists:
raise SynapseError(400, "Can't send same reaction twice")
# Don't attempt to start a thread if the parent event is a relation.
elif relation_type == RelationTypes.THREAD:
if await self.store.event_includes_relation(relates_to):
raise SynapseError(
400, "Cannot start threads from an event with a relation"
)
@measure_func("handle_new_client_event")
async def handle_new_client_event(
self,
requester: Requester,
event: EventBase,
context: EventContext,
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
ignore_shadow_ban: bool = False,
) -> EventBase:
"""Processes a new event.
This includes deduplicating, checking auth, persisting,
notifying users, sending to remote servers, etc.
If called from a worker will hit out to the master process for final
processing.
Args:
requester
event
context
ratelimit
extra_users: Any extra users to notify about event
ignore_shadow_ban: True if shadow-banned users should be allowed to
send this event.
Return:
If the event was deduplicated, the previous, duplicate, event. Otherwise,
`event`.
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
extra_users = extra_users or []
# we don't apply shadow-banning to membership events here. Invites are blocked
# higher up the stack, and we allow shadow-banned users to send join and leave
# events as normal.
if (
event.type != EventTypes.Member
and not ignore_shadow_ban
and requester.shadow_banned
):
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
if event.is_state():
prev_event = await self.deduplicate_state_event(event, context)
if prev_event is not None:
logger.info(
"Not bothering to persist state event %s duplicated by %s",
event.event_id,
prev_event.event_id,
)
return prev_event
if event.is_state() and (event.type, event.state_key) == (
EventTypes.Create,
"",
):
room_version_id = event.content.get(
"room_version", RoomVersions.V1.identifier
)
room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not room_version_obj:
raise UnsupportedRoomVersionError(
"Attempt to create a room with unsupported room version %s"
% (room_version_id,)
)
else:
room_version_obj = await self.store.get_room_version(event.room_id)
if event.internal_metadata.is_out_of_band_membership():
# the only sort of out-of-band-membership events we expect to see here are
# invite rejections and rescinded knocks that we have generated ourselves.
assert event.type == EventTypes.Member
assert event.content["membership"] == Membership.LEAVE
else:
try:
validate_event_for_room_version(room_version_obj, event)
await self._event_auth_handler.check_auth_rules_from_context(
room_version_obj, event, context
)
except AuthError as err:
logger.warning("Denying new event %r because %s", event, err)
raise err
# Ensure that we can round trip before trying to persist in db
try:
dump = json_encoder.encode(event.content)
json_decoder.decode(dump)
except Exception:
logger.exception("Failed to encode content: %r", event.content)
raise
# We now persist the event (and update the cache in parallel, since we
# don't want to block on it).
result, _ = await make_deferred_yieldable(
gather_results(
(
run_in_background(
self._persist_event,
requester=requester,
event=event,
context=context,
ratelimit=ratelimit,
extra_users=extra_users,
),
run_in_background(
self.cache_joined_hosts_for_event, event, context
).addErrback(log_failure, "cache_joined_hosts_for_event failed"),
),
consumeErrors=True,
)
).addErrback(unwrapFirstError)
return result
async def _persist_event(
self,
requester: Requester,
event: EventBase,
context: EventContext,
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
) -> EventBase:
"""Actually persists the event. Should only be called by
`handle_new_client_event`, and see its docstring for documentation of
the arguments.
"""
# Skip push notification actions for historical messages
# because we don't want to notify people about old history back in time.
# The historical messages also do not have the proper `context.current_state_ids`
# and `state_groups` because they have `prev_events` that aren't persisted yet
# (historical messages persisted in reverse-chronological order).
if not event.internal_metadata.is_historical():
await self.action_generator.handle_push_actions_for_event(event, context)
try:
# If we're a worker we need to hit out to the master.
writer_instance = self._events_shard_config.get_instance(event.room_id)
if writer_instance != self._instance_name:
result = await self.send_event(
instance_name=writer_instance,
event_id=event.event_id,
store=self.store,
requester=requester,
event=event,
context=context,
ratelimit=ratelimit,
extra_users=extra_users,
)
stream_id = result["stream_id"]
event_id = result["event_id"]
if event_id != event.event_id:
# If we get a different event back then it means that its
# been de-duplicated, so we replace the given event with the
# one already persisted.
event = await self.store.get_event(event_id)
else:
# If we newly persisted the event then we need to update its
# stream_ordering entry manually (as it was persisted on
# another worker).
event.internal_metadata.stream_ordering = stream_id
return event
event = await self.persist_and_notify_client_event(
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
)
return event
except Exception:
# Ensure that we actually remove the entries in the push actions
# staging area, if we calculated them.
await self.store.remove_push_actions_from_staging(event.event_id)
raise
async def cache_joined_hosts_for_event(
self, event: EventBase, context: EventContext
) -> None:
"""Precalculate the joined hosts at the event, when using Redis, so that
external federation senders don't have to recalculate it themselves.
"""
if not self._external_cache.is_enabled():
return
# If external cache is enabled we should always have this.
assert self._external_cache_joined_hosts_updates is not None
# We actually store two mappings, event ID -> prev state group,
# state group -> joined hosts, which is much more space efficient
# than event ID -> joined hosts.
#
# Note: We have to cache event ID -> prev state group, as we don't
# store that in the DB.
#
# Note: We set the state group -> joined hosts cache if it hasn't been
# set for a while, so that the expiry time is reset.
state_entry = await self.state.resolve_state_groups_for_events(
event.room_id, event_ids=event.prev_event_ids()
)
if state_entry.state_group:
await self._external_cache.set(
"event_to_prev_state_group",
event.event_id,
state_entry.state_group,
expiry_ms=60 * 60 * 1000,
)
if state_entry.state_group in self._external_cache_joined_hosts_updates:
return
joined_hosts = await self.store.get_joined_hosts(event.room_id, state_entry)
# Note that the expiry times must be larger than the expiry time in
# _external_cache_joined_hosts_updates.
await self._external_cache.set(
"get_joined_hosts",
str(state_entry.state_group),
list(joined_hosts),
expiry_ms=60 * 60 * 1000,
)
self._external_cache_joined_hosts_updates[state_entry.state_group] = None
async def _validate_canonical_alias(
self,
directory_handler: DirectoryHandler,
room_alias_str: str,
expected_room_id: str,
) -> None:
"""
Ensure that the given room alias points to the expected room ID.
Args:
directory_handler: The directory handler object.
room_alias_str: The room alias to check.
expected_room_id: The room ID that the alias should point to.
"""
room_alias = RoomAlias.from_string(room_alias_str)
try:
mapping = await directory_handler.get_association(room_alias)
except SynapseError as e:
# Turn M_NOT_FOUND errors into M_BAD_ALIAS errors.
if e.errcode == Codes.NOT_FOUND:
raise SynapseError(
400,
"Room alias %s does not point to the room" % (room_alias_str,),
Codes.BAD_ALIAS,
)
raise
if mapping["room_id"] != expected_room_id:
raise SynapseError(
400,
"Room alias %s does not point to the room" % (room_alias_str,),
Codes.BAD_ALIAS,
)
async def persist_and_notify_client_event(
self,
requester: Requester,
event: EventBase,
context: EventContext,
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
) -> EventBase:
"""Called when we have fully built the event, have already
calculated the push actions for the event, and checked auth.
This should only be run on the instance in charge of persisting events.
Returns:
The persisted event. This may be different than the given event if
it was de-duplicated (e.g. because we had already persisted an
event with the same transaction ID.)
"""
extra_users = extra_users or []
assert self.storage.persistence is not None
assert self._events_shard_config.should_handle(
self._instance_name, event.room_id
)
if ratelimit:
# We check if this is a room admin redacting an event so that we
# can apply different ratelimiting. We do this by simply checking
# it's not a self-redaction (to avoid having to look up whether the
# user is actually admin or not).
is_admin_redaction = False
if event.type == EventTypes.Redaction:
assert event.redacts is not None
original_event = await self.store.get_event(
event.redacts,
redact_behaviour=EventRedactBehaviour.AS_IS,
get_prev_content=False,
allow_rejected=False,
allow_none=True,
)
is_admin_redaction = bool(
original_event and event.sender != original_event.sender
)
await self.request_ratelimiter.ratelimit(
requester, is_admin_redaction=is_admin_redaction
)
await self._maybe_kick_guest_users(event, context)
if event.type == EventTypes.CanonicalAlias:
# Validate a newly added alias or newly added alt_aliases.
original_alias = None
original_alt_aliases: List[str] = []
original_event_id = event.unsigned.get("replaces_state")
if original_event_id:
original_event = await self.store.get_event(original_event_id)
if original_event:
original_alias = original_event.content.get("alias", None)
original_alt_aliases = original_event.content.get("alt_aliases", [])
# Check the alias is currently valid (if it has changed).
room_alias_str = event.content.get("alias", None)
directory_handler = self.hs.get_directory_handler()
if room_alias_str and room_alias_str != original_alias:
await self._validate_canonical_alias(
directory_handler, room_alias_str, event.room_id
)
# Check that alt_aliases is the proper form.
alt_aliases = event.content.get("alt_aliases", [])
if not isinstance(alt_aliases, (list, tuple)):
raise SynapseError(
400, "The alt_aliases property must be a list.", Codes.INVALID_PARAM
)
# If the old version of alt_aliases is of an unknown form,
# completely replace it.
if not isinstance(original_alt_aliases, (list, tuple)):
original_alt_aliases = []
# Check that each alias is currently valid.
new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
if new_alt_aliases:
for alias_str in new_alt_aliases:
await self._validate_canonical_alias(
directory_handler, alias_str, event.room_id
)
federation_handler = self.hs.get_federation_handler()
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.INVITE:
event.unsigned[
"invite_room_state"
] = await self.store.get_stripped_room_state_from_event_context(
context,
self.room_prejoin_state_types,
membership_user_id=event.sender,
)
invitee = UserID.from_string(event.state_key)
if not self.hs.is_mine(invitee):
# TODO: Can we add signature from remote server in a nicer
# way? If we have been invited by a remote server, we need
# to get them to sign the event.
returned_invite = await federation_handler.send_invite(
invitee.domain, event
)
event.unsigned.pop("room_state", None)
# TODO: Make sure the signatures actually are correct.
event.signatures.update(returned_invite.signatures)
if event.content["membership"] == Membership.KNOCK:
event.unsigned[
"knock_room_state"
] = await self.store.get_stripped_room_state_from_event_context(
context,
self.room_prejoin_state_types,
)
if event.type == EventTypes.Redaction:
assert event.redacts is not None
original_event = await self.store.get_event(
event.redacts,
redact_behaviour=EventRedactBehaviour.AS_IS,
get_prev_content=False,
allow_rejected=False,
allow_none=True,
)
room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
# we can make some additional checks now if we have the original event.
if original_event:
if original_event.type == EventTypes.Create:
raise AuthError(403, "Redacting create events is not permitted")
if original_event.room_id != event.room_id:
raise SynapseError(400, "Cannot redact event from a different room")
if original_event.type == EventTypes.ServerACL:
raise AuthError(403, "Redacting server ACL events is not permitted")
# Add a little safety stop-gap to prevent people from trying to
# redact MSC2716 related events when they're in a room version
# which does not support it yet. We allow people to use MSC2716
# events in existing room versions but only from the room
# creator since it does not require any changes to the auth
# rules and in effect, the redaction algorithm . In the
# supported room version, we add the `historical` power level to
# auth the MSC2716 related events and adjust the redaction
# algorthim to keep the `historical` field around (redacting an
# event should only strip fields which don't affect the
# structural protocol level).
is_msc2716_event = (
original_event.type == EventTypes.MSC2716_INSERTION
or original_event.type == EventTypes.MSC2716_BATCH
or original_event.type == EventTypes.MSC2716_MARKER
)
if not room_version_obj.msc2716_historical and is_msc2716_event:
raise AuthError(
403,
"Redacting MSC2716 events is not supported in this room version",
)
prev_state_ids = await context.get_prev_state_ids()
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=True
)
auth_events_map = await self.store.get_events(auth_events_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_map.values()}
if event_auth.check_redaction(
room_version_obj, event, auth_events=auth_events
):
# this user doesn't have 'redact' rights, so we need to do some more
# checks on the original event. Let's start by checking the original
# event exists.
if not original_event:
raise NotFoundError("Could not find event %s" % (event.redacts,))
if event.user_id != original_event.user_id:
raise AuthError(403, "You don't have permission to redact events")
# all the checks are done.
event.internal_metadata.recheck_redaction = False
if event.type == EventTypes.Create:
prev_state_ids = await context.get_prev_state_ids()
if prev_state_ids:
raise AuthError(403, "Changing the room create event is forbidden")
if event.type == EventTypes.MSC2716_INSERTION:
room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
create_event = await self.store.get_create_event_for_room(event.room_id)
room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
# Only check an insertion event if the room version
# supports it or the event is from the room creator.
if room_version_obj.msc2716_historical or (
self.config.experimental.msc2716_enabled
and event.sender == room_creator
):
next_batch_id = event.content.get(
EventContentFields.MSC2716_NEXT_BATCH_ID
)
conflicting_insertion_event_id = None
if next_batch_id:
conflicting_insertion_event_id = (
await self.store.get_insertion_event_id_by_batch_id(
event.room_id, next_batch_id
)
)
if conflicting_insertion_event_id is not None:
# The current insertion event that we're processing is invalid
# because an insertion event already exists in the room with the
# same next_batch_id. We can't allow multiple because the batch
# pointing will get weird, e.g. we can't determine which insertion
# event the batch event is pointing to.
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Another insertion event already exists with the same next_batch_id",
errcode=Codes.INVALID_PARAM,
)
# Mark any `m.historical` messages as backfilled so they don't appear
# in `/sync` and have the proper decrementing `stream_ordering` as we import
backfilled = False
if event.internal_metadata.is_historical():
backfilled = True
# Note that this returns the event that was persisted, which may not be
# the same as we passed in if it was deduplicated due transaction IDs.
(
event,
event_pos,
max_stream_token,
) = await self.storage.persistence.persist_event(
event, context=context, backfilled=backfilled
)
if self._ephemeral_events_enabled:
# If there's an expiry timestamp on the event, schedule its expiry.
self._message_handler.maybe_schedule_expiry(event)
async def _notify() -> None:
try:
await self.notifier.on_new_room_event(
event, event_pos, max_stream_token, extra_users=extra_users
)
except Exception:
logger.exception(
"Error notifying about new room event %s",
event.event_id,
)
run_in_background(_notify)
if event.type == EventTypes.Message:
# We don't want to block sending messages on any presence code. This
# matters as sometimes presence code can take a while.
run_in_background(self._bump_active_time, requester.user)
return event
async def _maybe_kick_guest_users(
self, event: EventBase, context: EventContext
) -> None:
if event.type != EventTypes.GuestAccess:
return
guest_access = event.content.get(EventContentFields.GUEST_ACCESS)
if guest_access == GuestAccess.CAN_JOIN:
return
current_state_ids = await context.get_current_state_ids()
# since this is a client-generated event, it cannot be an outlier and we must
# therefore have the state ids.
assert current_state_ids is not None
current_state_dict = await self.store.get_events(
list(current_state_ids.values())
)
current_state = list(current_state_dict.values())
logger.info("maybe_kick_guest_users %r", current_state)
await self.hs.get_room_member_handler().kick_guest_users(current_state)
async def _bump_active_time(self, user: UserID) -> None:
try:
presence = self.hs.get_presence_handler()
await presence.bump_presence_active_time(user)
except Exception:
logger.exception("Error bumping presence active time")
async def _send_dummy_events_to_fill_extremities(self) -> None:
"""Background task to send dummy events into rooms that have a large
number of extremities
"""
self._expire_rooms_to_exclude_from_dummy_event_insertion()
room_ids = await self.store.get_rooms_with_many_extremities(
min_count=self._dummy_events_threshold,
limit=5,
room_id_filter=self._rooms_to_exclude_from_dummy_event_insertion.keys(),
)
for room_id in room_ids:
dummy_event_sent = await self._send_dummy_event_for_room(room_id)
if not dummy_event_sent:
# Did not find a valid user in the room, so remove from future attempts
# Exclusion is time limited, so the room will be rechecked in the future
# dependent on _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
logger.info(
"Failed to send dummy event into room %s. Will exclude it from "
"future attempts until cache expires" % (room_id,)
)
now = self.clock.time_msec()
self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now
async def _send_dummy_event_for_room(self, room_id: str) -> bool:
"""Attempt to send a dummy event for the given room.
Args:
room_id: room to try to send an event from
Returns:
True if a dummy event was successfully sent. False if no user was able
to send an event.
"""
# For each room we need to find a joined member we can use to send
# the dummy event with.
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
members = await self.state.get_current_users_in_room(
room_id, latest_event_ids=latest_event_ids
)
for user_id in members:
if not self.hs.is_mine_id(user_id):
continue
requester = create_requester(user_id, authenticated_entity=self.server_name)
try:
event, context = await self.create_event(
requester,
{
"type": EventTypes.Dummy,
"content": {},
"room_id": room_id,
"sender": user_id,
},
prev_event_ids=latest_event_ids,
)
event.internal_metadata.proactively_send = False
# Since this is a dummy-event it is OK if it is sent by a
# shadow-banned user.
await self.handle_new_client_event(
requester,
event,
context,
ratelimit=False,
ignore_shadow_ban=True,
)
return True
except AuthError:
logger.info(
"Failed to send dummy event into room %s for user %s due to "
"lack of power. Will try another user" % (room_id, user_id)
)
return False
def _expire_rooms_to_exclude_from_dummy_event_insertion(self) -> None:
expire_before = self.clock.time_msec() - _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
to_expire = set()
for room_id, time in self._rooms_to_exclude_from_dummy_event_insertion.items():
if time < expire_before:
to_expire.add(room_id)
for room_id in to_expire:
logger.debug(
"Expiring room id %s from dummy event insertion exclusion cache",
room_id,
)
del self._rooms_to_exclude_from_dummy_event_insertion[room_id]
async def _rebuild_event_after_third_party_rules(
self, third_party_result: dict, original_event: EventBase
) -> Tuple[EventBase, EventContext]:
# the third_party_event_rules want to replace the event.
# we do some basic checks, and then return the replacement event and context.
# Construct a new EventBuilder and validate it, which helps with the
# rest of these checks.
try:
builder = self.event_builder_factory.for_room_version(
original_event.room_version, third_party_result
)
self.validator.validate_builder(builder)
except SynapseError as e:
raise Exception(
"Third party rules module created an invalid event: " + e.msg,
)
immutable_fields = [
# changing the room is going to break things: we've already checked that the
# room exists, and are holding a concurrency limiter token for that room.
# Also, we might need to use a different room version.
"room_id",
# changing the type or state key might work, but we'd need to check that the
# calling functions aren't making assumptions about them.
"type",
"state_key",
]
for k in immutable_fields:
if getattr(builder, k, None) != original_event.get(k):
raise Exception(
"Third party rules module created an invalid event: "
"cannot change field " + k
)
# check that the new sender belongs to this HS
if not self.hs.is_mine_id(builder.sender):
raise Exception(
"Third party rules module created an invalid event: "
"invalid sender " + builder.sender
)
# copy over the original internal metadata
for k, v in original_event.internal_metadata.get_dict().items():
setattr(builder.internal_metadata, k, v)
# modules can send new state events, so we re-calculate the auth events just in
# case.
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
event = await builder.build(
prev_event_ids=prev_event_ids,
auth_event_ids=None,
)
# we rebuild the event context, to be on the safe side. If nothing else,
# delta_ids might need an update.
context = await self.state.compute_event_context(event)
return event, context
|
the-stack_0_24708
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds parser models."""
import tensorflow as tf
from syntaxnet.ops import gen_parser_ops
from tensorflow.python.ops import control_flow_ops as cf
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
def BatchedSparseToDense(sparse_indices, output_size):
"""Batch compatible sparse to dense conversion.
This is useful for one-hot coded target labels.
Args:
sparse_indices: [batch_size] tensor containing one index per batch
output_size: needed in order to generate the correct dense output
Returns:
A [batch_size, output_size] dense tensor.
"""
eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))
return tf.nn.embedding_lookup(eye, sparse_indices)
def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
"""Computes embeddings for each entry of sparse features sparse_features.
Args:
params: list of 2D tensors containing vector embeddings
sparse_features: 1D tensor of strings. Each entry is a string encoding of
dist_belief.SparseFeatures, and represents a variable length list of
feature ids, and optionally, corresponding weights values.
allow_weights: boolean to control whether the weights returned from the
SparseFeatures are used to multiply the embeddings.
Returns:
A tensor representing the combined embeddings for the sparse features.
For each entry s in sparse_features, the function looks up the embeddings
for each id and sums them into a single tensor weighing them by the
weight of each id. It returns a tensor with each entry of sparse_features
replaced by this combined embedding.
"""
if not isinstance(params, list):
params = [params]
# Lookup embeddings.
sparse_features = tf.convert_to_tensor(sparse_features)
indices, ids, weights = gen_parser_ops.unpack_syntax_net_sparse_features(
sparse_features)
embeddings = tf.nn.embedding_lookup(params, ids)
if allow_weights:
# Multiply by weights, reshaping to allow broadcast.
broadcast_weights_shape = tf.concat([tf.shape(weights), [1]], 0)
embeddings *= tf.reshape(weights, broadcast_weights_shape)
# Sum embeddings by index.
return tf.unsorted_segment_sum(embeddings, indices, tf.size(sparse_features))
class GreedyParser(object):
"""Builds a Chen & Manning style greedy neural net parser.
Builds a graph with an optional reader op connected at one end and
operations needed to train the network on the other. Supports multiple
network instantiations sharing the same parameters and network topology.
The following named nodes are added to the training and eval networks:
epochs: a tensor containing the current epoch number
cost: a tensor containing the current training step cost
gold_actions: a tensor containing actions from gold decoding
feature_endpoints: a list of sparse feature vectors
logits: output of the final layer before computing softmax
The training network also contains:
train_op: an op that executes a single training step
Typical usage:
parser = graph_builder.GreedyParser(num_actions, num_features,
num_feature_ids, embedding_sizes,
hidden_layer_sizes)
parser.AddTraining(task_context, batch_size=5)
with tf.Session('local') as sess:
# This works because the session uses the same default graph as the
# GraphBuilder did.
sess.run(parser.inits.values())
while True:
tf_epoch, _ = sess.run([parser.training['epoch'],
parser.training['train_op']])
if tf_epoch[0] > 0:
break
"""
def __init__(self,
num_actions,
num_features,
num_feature_ids,
embedding_sizes,
hidden_layer_sizes,
seed=None,
gate_gradients=False,
use_locking=False,
embedding_init=1.0,
relu_init=1e-4,
bias_init=0.2,
softmax_init=1e-4,
averaging_decay=0.9999,
use_averaging=True,
check_parameters=True,
check_every=1,
allow_feature_weights=False,
only_train='',
arg_prefix=None,
**unused_kwargs):
"""Initialize the graph builder with parameters defining the network.
Args:
num_actions: int size of the set of parser actions
num_features: int list of dimensions of the feature vectors
num_feature_ids: int list of same length as num_features corresponding to
the sizes of the input feature spaces
embedding_sizes: int list of same length as num_features of the desired
embedding layer sizes
hidden_layer_sizes: int list of desired relu layer sizes; may be empty
seed: optional random initializer seed to enable reproducibility
gate_gradients: if True, gradient updates are computed synchronously,
ensuring consistency and reproducibility
use_locking: if True, use locking to avoid read-write contention when
updating Variables
embedding_init: sets the std dev of normal initializer of embeddings to
embedding_init / embedding_size ** .5
relu_init: sets the std dev of normal initializer of relu weights
to relu_init
bias_init: sets constant initializer of relu bias to bias_init
softmax_init: sets the std dev of normal initializer of softmax init
to softmax_init
averaging_decay: decay for exponential moving average when computing
averaged parameters, set to 1 to do vanilla averaging
use_averaging: whether to use moving averages of parameters during evals
check_parameters: whether to check for NaN/Inf parameters during
training
check_every: checks numerics every check_every steps.
allow_feature_weights: whether feature weights are allowed.
only_train: the comma separated set of parameter names to train. If empty,
all model parameters will be trained.
arg_prefix: prefix for context parameters.
"""
self._num_actions = num_actions
self._num_features = num_features
self._num_feature_ids = num_feature_ids
self._embedding_sizes = embedding_sizes
self._hidden_layer_sizes = hidden_layer_sizes
self._seed = seed
self._gate_gradients = gate_gradients
self._use_locking = use_locking
self._use_averaging = use_averaging
self._check_parameters = check_parameters
self._check_every = check_every
self._allow_feature_weights = allow_feature_weights
self._only_train = set(only_train.split(',')) if only_train else None
self._feature_size = len(embedding_sizes)
self._embedding_init = embedding_init
self._relu_init = relu_init
self._softmax_init = softmax_init
self._arg_prefix = arg_prefix
# Parameters of the network with respect to which training is done.
self.params = {}
# Other variables, with respect to which no training is done, but which we
# nonetheless need to save in order to capture the state of the graph.
self.variables = {}
# Operations to initialize any nodes that require initialization.
self.inits = {}
# Training- and eval-related nodes.
self.training = {}
self.evaluation = {}
self.saver = None
# Nodes to compute moving averages of parameters, called every train step.
self._averaging = {}
self._averaging_decay = averaging_decay
# Pretrained embeddings that can be used instead of constant initializers.
self._pretrained_embeddings = {}
# After the following 'with' statement, we'll be able to re-enter the
# 'params' scope by re-using the self._param_scope member variable. See for
# instance _AddParam.
with tf.name_scope('params') as self._param_scope:
self._relu_bias_init = tf.constant_initializer(bias_init)
@property
def embedding_size(self):
size = 0
for i in range(self._feature_size):
size += self._num_features[i] * self._embedding_sizes[i]
return size
def _AddParam(self,
shape,
dtype,
name,
initializer=None,
return_average=False):
"""Add a model parameter w.r.t. we expect to compute gradients.
_AddParam creates both regular parameters (usually for training) and
averaged nodes (usually for inference). It returns one or the other based
on the 'return_average' arg.
Args:
shape: int list, tensor shape of the parameter to create
dtype: tf.DataType, data type of the parameter
name: string, name of the parameter in the TF graph
initializer: optional initializer for the paramter
return_average: if False, return parameter otherwise return moving average
Returns:
parameter or averaged parameter
"""
if name not in self.params:
step = tf.cast(self.GetStep(), tf.float32)
# Put all parameters and their initializing ops in their own scope
# irrespective of the current scope (training or eval).
with tf.name_scope(self._param_scope):
self.params[name] = tf.get_variable(name, shape, dtype, initializer)
param = self.params[name]
if initializer is not None:
self.inits[name] = state_ops.init_variable(param, initializer)
if self._averaging_decay == 1:
logging.info('Using vanilla averaging of parameters.')
ema = tf.train.ExponentialMovingAverage(decay=(step / (step + 1.0)),
num_updates=None)
else:
ema = tf.train.ExponentialMovingAverage(decay=self._averaging_decay,
num_updates=step)
self._averaging[name + '_avg_update'] = ema.apply([param])
self.variables[name + '_avg_var'] = ema.average(param)
self.inits[name + '_avg_init'] = state_ops.init_variable(
ema.average(param), tf.zeros_initializer())
return (self.variables[name + '_avg_var'] if return_average else
self.params[name])
def GetStep(self):
def OnesInitializer(shape, dtype=tf.float32, partition_info=None):
return tf.ones(shape, dtype)
return self._AddVariable([], tf.int32, 'step', OnesInitializer)
def _AddVariable(self, shape, dtype, name, initializer=None):
if name in self.variables:
return self.variables[name]
self.variables[name] = tf.get_variable(name, shape, dtype, initializer)
if initializer is not None:
self.inits[name] = state_ops.init_variable(self.variables[name],
initializer)
return self.variables[name]
def _ReluWeightInitializer(self):
with tf.name_scope(self._param_scope):
return tf.random_normal_initializer(stddev=self._relu_init,
seed=self._seed)
def _EmbeddingMatrixInitializer(self, index, embedding_size):
if index in self._pretrained_embeddings:
return self._pretrained_embeddings[index]
else:
return tf.random_normal_initializer(
stddev=self._embedding_init / embedding_size**.5,
seed=self._seed)
def _AddEmbedding(self,
features,
num_features,
num_ids,
embedding_size,
index,
return_average=False):
"""Adds an embedding matrix and passes the `features` vector through it."""
embedding_matrix = self._AddParam(
[num_ids, embedding_size],
tf.float32,
'embedding_matrix_%d' % index,
self._EmbeddingMatrixInitializer(index, embedding_size),
return_average=return_average)
embedding = EmbeddingLookupFeatures(embedding_matrix,
tf.reshape(features,
[-1],
name='feature_%d' % index),
self._allow_feature_weights)
return tf.reshape(embedding, [-1, num_features * embedding_size])
def _BuildNetwork(self, feature_endpoints, return_average=False):
"""Builds a feed-forward part of the net given features as input.
The network topology is already defined in the constructor, so multiple
calls to BuildForward build multiple networks whose parameters are all
shared. It is the source of the input features and the use of the output
that distinguishes each network.
Args:
feature_endpoints: tensors with input features to the network
return_average: whether to use moving averages as model parameters
Returns:
logits: output of the final layer before computing softmax
"""
assert len(feature_endpoints) == self._feature_size
# Create embedding layer.
embeddings = []
for i in range(self._feature_size):
embeddings.append(self._AddEmbedding(feature_endpoints[i],
self._num_features[i],
self._num_feature_ids[i],
self._embedding_sizes[i],
i,
return_average=return_average))
last_layer = tf.concat(embeddings, 1)
last_layer_size = self.embedding_size
# Create ReLU layers.
for i, hidden_layer_size in enumerate(self._hidden_layer_sizes):
weights = self._AddParam(
[last_layer_size, hidden_layer_size],
tf.float32,
'weights_%d' % i,
self._ReluWeightInitializer(),
return_average=return_average)
bias = self._AddParam([hidden_layer_size],
tf.float32,
'bias_%d' % i,
self._relu_bias_init,
return_average=return_average)
last_layer = tf.nn.relu_layer(last_layer,
weights,
bias,
name='layer_%d' % i)
last_layer_size = hidden_layer_size
# Create softmax layer.
softmax_weight = self._AddParam(
[last_layer_size, self._num_actions],
tf.float32,
'softmax_weight',
tf.random_normal_initializer(stddev=self._softmax_init,
seed=self._seed),
return_average=return_average)
softmax_bias = self._AddParam(
[self._num_actions],
tf.float32,
'softmax_bias',
tf.zeros_initializer(),
return_average=return_average)
logits = tf.nn.xw_plus_b(last_layer,
softmax_weight,
softmax_bias,
name='logits')
return {'logits': logits}
def _AddGoldReader(self, task_context, batch_size, corpus_name):
features, epochs, gold_actions = (
gen_parser_ops.gold_parse_reader(task_context,
self._feature_size,
batch_size,
corpus_name=corpus_name,
arg_prefix=self._arg_prefix))
return {'gold_actions': tf.identity(gold_actions,
name='gold_actions'),
'epochs': tf.identity(epochs,
name='epochs'),
'feature_endpoints': features}
def _AddDecodedReader(self, task_context, batch_size, transition_scores,
corpus_name):
features, epochs, eval_metrics, documents = (
gen_parser_ops.decoded_parse_reader(transition_scores,
task_context,
self._feature_size,
batch_size,
corpus_name=corpus_name,
arg_prefix=self._arg_prefix))
return {'eval_metrics': eval_metrics,
'epochs': tf.identity(epochs,
name='epochs'),
'feature_endpoints': features,
'documents': documents}
def _AddCostFunction(self, batch_size, gold_actions, logits):
"""Cross entropy plus L2 loss on weights and biases of the hidden layers."""
dense_golden = BatchedSparseToDense(gold_actions, self._num_actions)
cross_entropy = tf.div(
tf.reduce_sum(
tf.nn.softmax_cross_entropy_with_logits(
labels=dense_golden, logits=logits)), batch_size)
regularized_params = [tf.nn.l2_loss(p)
for k, p in self.params.items()
if k.startswith('weights') or k.startswith('bias')]
l2_loss = 1e-4 * tf.add_n(regularized_params) if regularized_params else 0
return {'cost': tf.add(cross_entropy, l2_loss, name='cost')}
def AddEvaluation(self,
task_context,
batch_size,
evaluation_max_steps=300,
corpus_name='documents'):
"""Builds the forward network only without the training operation.
Args:
task_context: file path from which to read the task context.
batch_size: batch size to request from reader op.
evaluation_max_steps: max number of parsing actions during evaluation,
only used in beam parsing.
corpus_name: name of the task input to read parses from.
Returns:
Dictionary of named eval nodes.
"""
def _AssignTransitionScores():
return tf.assign(nodes['transition_scores'],
nodes['logits'], validate_shape=False)
def _Pass():
return tf.constant(-1.0)
unused_evaluation_max_steps = evaluation_max_steps
with tf.name_scope('evaluation'):
nodes = self.evaluation
nodes['transition_scores'] = self._AddVariable(
[batch_size, self._num_actions], tf.float32, 'transition_scores',
tf.constant_initializer(-1.0))
nodes.update(self._AddDecodedReader(task_context, batch_size, nodes[
'transition_scores'], corpus_name))
nodes.update(self._BuildNetwork(nodes['feature_endpoints'],
return_average=self._use_averaging))
nodes['eval_metrics'] = cf.with_dependencies(
[tf.cond(tf.greater(tf.size(nodes['logits']), 0),
_AssignTransitionScores, _Pass)],
nodes['eval_metrics'], name='eval_metrics')
return nodes
def _IncrementCounter(self, counter):
return state_ops.assign_add(counter, 1, use_locking=True)
def _AddLearningRate(self, initial_learning_rate, decay_steps):
"""Returns a learning rate that decays by 0.96 every decay_steps.
Args:
initial_learning_rate: initial value of the learning rate
decay_steps: decay by 0.96 every this many steps
Returns:
learning rate variable.
"""
step = self.GetStep()
return cf.with_dependencies(
[self._IncrementCounter(step)],
tf.train.exponential_decay(initial_learning_rate,
step,
decay_steps,
0.96,
staircase=True))
def AddPretrainedEmbeddings(self, index, embeddings_path, task_context):
"""Embeddings at the given index will be set to pretrained values."""
def _Initializer(shape, dtype=tf.float32, partition_info=None):
"""Variable initializer that loads pretrained embeddings."""
unused_dtype = dtype
seed1, seed2 = tf.get_seed(self._seed)
t = gen_parser_ops.word_embedding_initializer(
vectors=embeddings_path,
task_context=task_context,
embedding_init=self._embedding_init,
cache_vectors_locally=False,
seed=seed1,
seed2=seed2)
t.set_shape(shape)
return t
self._pretrained_embeddings[index] = _Initializer
def AddTraining(self,
task_context,
batch_size,
learning_rate=0.1,
decay_steps=4000,
momentum=0.9,
corpus_name='documents'):
"""Builds a trainer to minimize the cross entropy cost function.
Args:
task_context: file path from which to read the task context
batch_size: batch size to request from reader op
learning_rate: initial value of the learning rate
decay_steps: decay learning rate by 0.96 every this many steps
momentum: momentum parameter used when training with momentum
corpus_name: name of the task input to read parses from
Returns:
Dictionary of named training nodes.
"""
with tf.name_scope('training'):
nodes = self.training
nodes.update(self._AddGoldReader(task_context, batch_size, corpus_name))
nodes.update(self._BuildNetwork(nodes['feature_endpoints'],
return_average=False))
nodes.update(self._AddCostFunction(batch_size, nodes['gold_actions'],
nodes['logits']))
# Add the optimizer
if self._only_train:
trainable_params = [v
for k, v in self.params.iteritems()
if k in self._only_train]
else:
trainable_params = self.params.values()
lr = self._AddLearningRate(learning_rate, decay_steps)
optimizer = tf.train.MomentumOptimizer(lr,
momentum,
use_locking=self._use_locking)
train_op = optimizer.minimize(nodes['cost'], var_list=trainable_params)
for param in trainable_params:
slot = optimizer.get_slot(param, 'momentum')
self.inits[slot.name] = state_ops.init_variable(slot,
tf.zeros_initializer())
self.variables[slot.name] = slot
numerical_checks = [
tf.check_numerics(param,
message='Parameter is not finite.')
for param in trainable_params
if param.dtype.base_dtype in [tf.float32, tf.float64]
]
check_op = tf.group(*numerical_checks)
avg_update_op = tf.group(*self._averaging.values())
train_ops = [train_op]
if self._check_parameters:
train_ops.append(check_op)
if self._use_averaging:
train_ops.append(avg_update_op)
nodes['train_op'] = tf.group(*train_ops, name='train_op')
return nodes
def AddSaver(self, slim_model=False):
"""Adds ops to save and restore model parameters.
Args:
slim_model: whether only averaged variables are saved.
Returns:
the saver object.
"""
# We have to put the save op in the root scope otherwise running
# "save/restore_all" won't find the "save/Const" node it expects.
with tf.name_scope(None):
variables_to_save = self.params.copy()
variables_to_save.update(self.variables)
if slim_model:
for key in variables_to_save.keys():
if not key.endswith('avg_var'):
del variables_to_save[key]
self.saver = tf.train.Saver(
variables_to_save, builder=tf_saver.BaseSaverBuilder())
return self.saver
|
the-stack_0_24709
|
#!/usr/bin/env python
import importlib
import os
from setuptools import find_packages, setup
HERE = os.path.abspath(os.path.dirname(__file__))
version = {}
with open(os.path.join(HERE, 'flamingo', '__version__.py')) as f:
exec(f.read(), version)
with open('README.rst') as readme_file:
README = readme_file.read()
try:
importlib.import_module("rdkit")
except ModuleNotFoundError:
exc = ModuleNotFoundError(
"'flamingo' requires the 'rdkit' package: https://anaconda.org/conda-forge/rdkit"
)
exc.__cause__ = None
raise exc
setup(
name='nlesc-flamingo',
version=version['__version__'],
description="Compute and filter molecular properties",
long_description=README + '\n\n',
long_description_content_type='text/x-rst',
author="Felipe Zapata",
author_email='[email protected]',
url='https://github.com/https://github.com/nlesc-nano/flamingo',
packages=find_packages(),
include_package_data=True,
license="Apache Software License 2.0",
zip_safe=False,
keywords='flamingo',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Typing :: Typed',
],
python_requires='>=3.7',
install_requires=[
'nlesc-CAT>=0.10.0',
'nano-CAT>=0.7.0',
'data-CAT>=0.7.0',
'plams>=1.5.1',
'more_itertools',
'numpy',
'pandas',
'pyyaml>=5.1.1',
'schema!=0.7.5',
'typing_extensions',
'h5py',
],
entry_points={
'console_scripts': [
'smiles_screener=flamingo.screen:main'
]
},
package_data={
'flamingo': [
'data/scscore/full_reaxys_model_1024bool/model.ckpt-10654.as_numpy.json.gz',
'data/scscore/full_reaxys_model_2048bool/model.ckpt-10654.as_numpy.json.gz',
'py.typed',
]
},
data_files=[('citation/flamingo', ['CITATION.cff'])],
extras_require={
'test': ['coverage', 'mypy', 'pycodestyle', 'pytest>=3.9', 'pytest-cov',
'pytest-mock'],
'doc': ['sphinx', 'sphinx-autodoc-typehints', 'sphinx_rtd_theme',
'nbsphinx']
}
)
|
the-stack_0_24712
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSnprelate(RPackage):
"""Parallel Computing Toolset for Relatedness and Principal Component
Analysis of SNP Data.
Genome-wide association studies (GWAS) are widely used to investigate
the genetic basis of diseases and traits, but they pose many
computational challenges. We developed an R package SNPRelate to provide
a binary format for single-nucleotide polymorphism (SNP) data in GWAS
utilizing CoreArray Genomic Data Structure (GDS) data files. The GDS
format offers the efficient operations specifically designed for
integers with two bits, since a SNP could occupy only two bits.
SNPRelate is also designed to accelerate two key computations on SNP
data using parallel computing for multi-core symmetric multiprocessing
computer architectures: Principal Component Analysis (PCA) and
relatedness analysis using Identity-By-Descent measures. The SNP GDS
format is also used by the GWASTools package with the support of S4
classes and generic functions. The extended GDS format is implemented in
the SeqArray package to support the storage of single nucleotide
variations (SNVs), insertion/deletion polymorphism (indel) and
structural variation calls."""
homepage = "https://bioconductor.org/packages/SNPRelate"
git = "https://git.bioconductor.org/packages/SNPRelate.git"
version('1.18.1', commit='81c581bf76392efdc8ba237ca2e42ca1dba788ca')
version('1.16.0', commit='0e38e8df4af87dff6c27a23af2867661998c0d85')
version('1.14.0', commit='9501cbfc411aa320e58654a865fda2e9077977af')
version('1.12.2', commit='dce2e2b6f36483a9f905bb5df6ae834a9f1136fe')
version('1.10.2', commit='3f5c4010871df742e7a460586b38ad0c2fd37aeb')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
|
the-stack_0_24714
|
import random
import csv
class Chance:
__messages = []
__money = []
# 讀取 CSV 檔案
def choice(self):
with open('chance.csv', newline='', encoding='utf-8') as csvfile:
rows = csv.DictReader(csvfile)
for row in rows:
self.__messages.append(row['機會訊息'])
self.__money.append(row['金額'])
# 隨機抽取一張
nums = random.randint(0,len(self.__messages)-1)
return (self.__messages[nums],self.__money[nums])
if __name__ == "__main__":
myChance = Chance()
print(myChance.choice())
|
the-stack_0_24715
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from segwit import send_to_witness
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
def run_test(self):
raise SkipTest("GreenRightCoin doesn't support RBF.")
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.1 ltc (10,000,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.1)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("2.5"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propogates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.1"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.09"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.05"),
rbf_node.getrawchangeaddress(): Decimal("0.03")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.1")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.020000})
tx = rbf_node.signrawtransaction(tx)
txid = rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 5000000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 5000001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 4990000})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["size"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 200000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 300000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 300000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 1000000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 2000000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.050000"),
node.getrawchangeaddress(): Decimal("0.049000")})
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
|
the-stack_0_24720
|
# Load libraries
import json
import os
import sys
global data
import time
i = 0
i1 = 0
i2 = 3
i3 = 5
i4 = 7
i5 = 9
i6 = 1
i7 = 4
i8 = 6
i9 = 8
while i < 1000:
i1 = i1+1
i2 = i2 +1
i3 = i3 +1
i4 = i4 +1
i5 = i5 +1
i6 = i6 +1
i7 = i7 +1
i8 = i8 +1
i9 = i9 +1
j = 0
print("LIve data for Graph %d " %i)
jsonObject = {}
#jsonObject['EngineID'] = "df_solution['Engine_ID'].values.tolist()"
jsonObject['Predicted'] = [i1, i2, i3, i4, i5, i6, i7, i8, i9, i1, i2, i3, i4, i5, i6, i7, i8, i9]
jsonObject['time'] = [j+1, j+2, j+3, j+4, j+5, j+6, j+7, j+8, j+9, j+10, j+11, j+12, j+13, j+14, j+15, j+16, j+17, j+18 ]
#jsonObject['Actual_RUL'] = df_solution['Actual_RUL'].values.tolist()
#jsonName = str("../Output/") + aName + str("_") + cleanApproach + str("_Predicted.json")
with open('./assets/json/dummy.json', 'w') as outfile:
json.dump(jsonObject, outfile)
print(jsonObject)
i = i+1
j = j+1
if i == 1000:
i = 0
i1 = 0
i2 = 3
i3 = 5
i4 = 7
i5 = 9
i6 = 1
i7 = 4
i8 = 6
i9 = 8
time.sleep(3)
|
the-stack_0_24722
|
"""
Created by Fanghl on 2020/9/10 11:53
"""
from .app import Flask
def register_blueprints(app):
from app.api.v1 import create_blueprint_v1
app.register_blueprint(create_blueprint_v1(), url_prefix='/v1')
def register_plugin(app):
from app.models.base import db
db.init_app(app)
with app.app_context():
db.create_all()
def create_app():
app = Flask(__name__)
app.config.from_object('app.config.secure')
app.config.from_object('app.config.setting')
register_blueprints(app)
register_plugin(app)
return app
|
the-stack_0_24726
|
# Naver Search Workflow for Alfred 2
# Copyright (c) 2021 Jinuk Baek
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from workflow import web, Workflow
def get_dictionary_data(word):
url = 'https://ac-dict.naver.com/koko/ac'
params = dict(frm='stdkrdic', oe='utf8', m=0, r=1, st=111, r_lt=111, q=word)
r = web.get(url, params)
r.raise_for_status()
return r.json()
def main(wf):
import cgi;
args = wf.args[0]
wf.add_item(title = 'Search Naver Krdic for \'%s\'' % args,
autocomplete=args,
arg=args,
valid=True)
def wrapper():
return get_dictionary_data(args)
res_json = wf.cached_data("kr_%s" % args, wrapper, max_age=600)
for items in res_json['items']:
for ltxt in items:
if len(ltxt) > 0:
txt = ltxt[0][0]
wf.add_item(title = u"%s" % txt ,
subtitle = 'Search Naver Krdic for \'%s\'' % txt,
autocomplete=txt,
arg=txt,
valid=True);
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
the-stack_0_24727
|
# Advent of Code 2018, Day 4
# (c) blu3r4y
import datetime
import numpy as np
from parse import parse
from functools import partial
from collections import namedtuple, defaultdict
Guard = namedtuple("Guard", ["id", "shift", "asleep"])
def part1(guards):
return solve(guards)[0]
def part2(guards):
return solve(guards)[1]
def solve(guards):
# fill a matrix with the slept minutes, per guard
minutes = defaultdict(partial(np.zeros, 60))
for guard in guards:
for asleep in guard.asleep:
t = asleep[0]
while t < asleep[1]:
minutes[guard.id][t.minute] += 1
t += datetime.timedelta(minutes=1)
# strategy 1: most slept minute of the most sleepy guard
guard1 = max(minutes.keys(), key=lambda guard: np.sum(minutes[guard]))
# strategy 2: most slept minute of all guards
guard2 = max(minutes.keys(), key=lambda guard: np.max(minutes[guard]))
return guard1 * np.argmax(minutes[guard1]), guard2 * np.argmax(minutes[guard2])
def _parse(lines):
guards = []
for line in sorted(lines, key=lambda line: parse("[{time:ti}] {}", line)['time']):
begin_ = parse("[{time:ti}] Guard #{id:d} begins shift", line)
asleep_ = parse("[{time:ti}] falls asleep", line)
wakeup_ = parse("[{time:ti}] wakes up", line)
if begin_: guards.append(Guard(id=begin_['id'], shift=begin_['time'], asleep=[]))
if asleep_: guards[-1].asleep.append([asleep_['time'], None])
if wakeup_: guards[-1].asleep[-1][1] = wakeup_['time']
return guards
if __name__ == "__main__":
print(part1(_parse(open(r"../assets/day4_demo.txt").readlines())))
print(part1(_parse(open(r"../assets/day4.txt").readlines())))
print(part2(_parse(open(r"../assets/day4_demo.txt").readlines())))
print(part2(_parse(open(r"../assets/day4.txt").readlines())))
|
the-stack_0_24729
|
# #from numpy import array, zeros
# from array import *
import numpy as np
#from numpy import fabs
'''
a=np.array([[25,5,1],
[64,8,1],
[144,12,1]],float)
b=[106.8,177,279.2]
n= len(b)
x= np.zeros(n,float)
# Elimination
for k in range(n - 1):
for i in range(k + 1, n):
if a[i, k] == 0:
continue
factor = a[k, k] / a[i, k]
for j in range(k, n):
a[i, j] = a[k, j] - a[i, j] * factor
b[i] = b[k] - b[i] * factor
print(a)
print(b)
# Back-Substitution
x[n - 1] = b[n - 1] / a[n - 1, n - 1]
for i in range(n - 2, -1, -1):
sum_ax = 0
for j in range(i + 1, n):
sum_ax += a[i, j] * x[j]
x[i] = (b[i] - sum_ax) / a[i, i]
print(x)
'''
### Gauss Elimination Method with Partial Pivoting
##3 defining array
## Coefficient Matrix
'''
a= np.array([[0,7,-1,3,1],
[0,3,4,1,7],
[6,2,0,2,-1],
[2,1,2,0,2],
[3,4,1,-2,1]],float)
## Constant vector
b = np.array([5,7,2,3,4],float)
'''
'''
a=np.array([[25,5,1],
[64,8,1],
[144,12,1]],float)
b=np.array([106.8,177,279.2],float)
'''
a=np.array([[20,15,10],
[-3,-2.249,7],
[5,1,3]],float)
b=np.array([45,1.751,9],float)
## length of the vector
n= len(b)
# defining zeros to fill the entries of x
x= np.zeros(n,float)
##
## --------------Partial Pivoting---------------
## if there is zero on the main diagonal the we have to interchange the row with another row which has greater value then zero
for k in range(n-1):
if abs(a[k,k])<1.0e-10:
for i in range(k+1, n): ### I have to check here what if i take n instead of (n-1), can we interchange the pivot row with the last row.
if abs(a[i,k])> abs(a[k,k]): #we can also write (1.0e-10) instead of a([k,k])### it doesn't matter whether we take n or n-1in the previous range.
a[[i,k]]=a[[k,i]]
b[[k,i]]=b[[i,k]]
break
# Elimination---->
#for k in range(n-1):
for i in range(k+1,n):
if a[i,k]==0:
continue
factor= a[k,k]/a[i,k]
for j in range(k,n):
a[i,j]= a[k,j]- a[i,j]*factor
b[i]=b[k]-b[i]*factor
print("the upper triangular matrix ")
U=a
print(np.round(U,1))
## back-Substitution---->
x[n-1]= b[n-1]/a[n-1,n-1]
for i in range(n-2,-1,-1):
sum_ax=0
for j in range(i+1,n):
sum_ax+=a[i,j]*x[j]
x[i]= (b[i]-sum_ax)/a[i,i]
print("The solution vector is: ")
print(np.round(x,1))
|
the-stack_0_24730
|
from __future__ import print_function
import base64
from datetime import datetime
import time
import json
from redis import Redis
r = Redis(host='trip-cache.w0fmsk.0001.use1.cache.amazonaws.com', port=6379)
def lambda_handler(event, context):
print('Loading function')
recordNum = 1
okRecords = 0
processingFailedRecords = 0
output = []
for record in event['records']:
payload = json.loads(base64.b64decode(record['data']))
jsData = json.dumps(payload)
r.zrem("pickups", jsData)
r.geoadd("pickups", payload['pickup_longitude'], payload['pickup_latitude'], jsData)
pos = r.geopos("pickups", jsData)
result = "Ok"
okRecords += 1
recordNum += 1
output_record = {
'recordId': record['recordId'],
'result': result,
'data': base64.b64encode(jsData + "\n")
}
output.append(output_record)
query = r.georadius("pickups", longitude=-74.286875, latitude=40.7625169, radius=100, unit="km", withdist=True)
print('reslult is {}'.format(query))
print('Successfully processed {} record(s).'.format(okRecords))
print('Failed to process {} record(s).'.format(processingFailedRecords))
print('Records Received {} record(s).'.format(len(event['records'])))
return {'records': output}
|
the-stack_0_24731
|
"""Store temporal simulation results for a single mesh with HDF5 and Xdmf.
This process:
- removes existing .h5 files at the start of the simulation.
- stores the initial model part in an .h5 file.
- stores historical and non-historical results in one .h5 file per output step.
- stores Xdmf metadata for post-processing (e.g., Paraview or VisIt).
This process works with or without MPI.
license: HDF5Application/license.txt
Main authors:
Philipp Bucher
Michael Andre
"""
__all__ = ["Factory"]
import KratosMultiphysics
from KratosMultiphysics.HDF5Application import core
from KratosMultiphysics.HDF5Application.utils import ParametersWrapper
from KratosMultiphysics.HDF5Application.utils import IsDistributed
from KratosMultiphysics.HDF5Application.utils import CreateOperationSettings
def Factory(settings, Model):
"""Return a process for single mesh temporal results output with Xdmf and HDF5.
The input settings are given in the following table:
+-------------------------------------+------------+---------------------------------+
| Setting | Type | Default Value |
+-------------------------------------+------------+---------------------------------+
| "model_part_name" | String | "" |
+-------------------------------------+------------+---------------------------------+
| "file_settings" | Parameters | "file_name": "<model_part_name>"|
| | | "time_format": "0.4f" |
| | | "file_access_mode": "truncate" |
| | | "max_files_to_keep": "unlimited"|
| | | "echo_level": 0 |
+-------------------------------------+------------+---------------------------------+
| "output_time_settings" | Parameters | "time_frequency": 1.0 |
| | | "step_frequency": 1 |
+-------------------------------------+------------+---------------------------------+
| "model_part_output_settings" | Parameters | "prefix": "/ModelData" |
+-------------------------------------+------------+---------------------------------+
| "nodal_solution_step_data_settings" | Parameters | "prefix": "/ResultsData" |
| | | "list_of_variables": [] |
+-------------------------------------+------------+---------------------------------+
| "nodal_data_value_settings" | Parameters | "prefix": "/ResultsData" |
| | | "list_of_variables": [] |
+-------------------------------------+------------+---------------------------------+
| "element_data_value_settings" | Parameters | "prefix": "/ResultsData" |
| | | "list_of_variables": [] |
+-------------------------------------+------------+---------------------------------+
| "nodal_flag_value_settings" | Parameters | "prefix": "/ResultsData" |
| | | "list_of_variables": [] |
+-------------------------------------+------------+---------------------------------+
| "element_flag_value_settings" | Parameters | "prefix": "/ResultsData" |
| | | "list_of_variables": [] |
+-------------------------------------+------------+---------------------------------+
| "condition_flag_value_settings" | Parameters | "prefix": "/ResultsData" |
| | | "list_of_variables": [] |
+-------------------------------------+------------+---------------------------------+
| "condition_data_value_settings" | Parameters | "prefix": "/ResultsData" |
| | | "list_of_variables": [] |
+-------------------------------------+------------+---------------------------------+
"""
core_settings = CreateCoreSettings(settings["Parameters"])
return SingleMeshXdmfOutputProcessFactory(core_settings, Model)
def SingleMeshXdmfOutputProcessFactory(core_settings, Model):
return core.Factory(core_settings, Model)
def CreateCoreSettings(user_settings):
"""Return the core settings.
The core setting "io_type" cannot be overwritten by the user. It is
automatically set depending on whether or not MPI is used.
"""
# Configure the defaults:
core_settings = ParametersWrapper("""
[{
"model_part_name" : "",
"process_step": "initialize",
"io_settings": {
"io_type": "mock_hdf5_file_io",
"file_name": "<model_part_name>.h5"
},
"list_of_operations": [{
"module_name": "operations.system",
"operation_type": "delete_old_h5_files"
}]
},{
"model_part_name": "",
"process_step": "before_solution_loop",
"io_settings": {
"io_type": "serial_hdf5_file_io",
"file_name": "<model_part_name>.h5",
"file_access_mode": "truncate"
},
"list_of_operations": []
},{
"model_part_name" : "",
"process_step": "finalize_solution_step",
"controller_settings": {
"controller_type": "temporal_controller"
},
"io_settings": {
"io_type": "serial_hdf5_file_io",
"file_name": "<model_part_name>-<time>.h5",
"file_access_mode": "truncate"
},
"list_of_operations": []
},{
"model_part_name" : "",
"process_step": "finalize_solution_step",
"controller_settings": {
"controller_type": "temporal_controller"
},
"io_settings": {
"io_type": "mock_hdf5_file_io",
"file_name": "<model_part_name>.h5"
},
"list_of_operations": [{
"module_name": "operations.xdmf",
"operation_type": "xdmf_output"
}]
}]
""")
# Apply the user settings:
user_settings.ValidateAndAssignDefaults(
KratosMultiphysics.Parameters("""
{
"model_part_name" : "MainModelPart",
"file_settings" : {},
"output_time_settings" : {},
"model_part_output_settings" : {},
"nodal_solution_step_data_settings" : {},
"nodal_data_value_settings": {},
"element_data_value_settings" : {},
"nodal_flag_value_settings": {},
"element_flag_value_settings" : {},
"condition_data_value_settings" : {},
"condition_flag_value_settings" : {}
}
"""))
user_settings = ParametersWrapper(user_settings)
for i in core_settings:
core_settings[i]["model_part_name"] = user_settings["model_part_name"]
for key in user_settings["file_settings"]:
core_settings[i]["io_settings"][key] = user_settings["file_settings"][key]
core_settings[0]["io_settings"]["io_type"] = "mock_hdf5_file_io"
core_settings[3]["io_settings"]["io_type"] = "mock_hdf5_file_io"
if IsDistributed():
model_part_output_type = "partitioned_model_part_output"
core_settings[1]["io_settings"]["io_type"] = "parallel_hdf5_file_io"
core_settings[2]["io_settings"]["io_type"] = "parallel_hdf5_file_io"
else:
model_part_output_type = "model_part_output"
core_settings[1]["io_settings"]["io_type"] = "serial_hdf5_file_io"
core_settings[2]["io_settings"]["io_type"] = "serial_hdf5_file_io"
core_settings[1]["list_of_operations"] = [
CreateOperationSettings(model_part_output_type,
user_settings["model_part_output_settings"]),
CreateOperationSettings("nodal_solution_step_data_output",
user_settings["nodal_solution_step_data_settings"]),
CreateOperationSettings("nodal_data_value_output",
user_settings["nodal_data_value_settings"]),
CreateOperationSettings("element_data_value_output",
user_settings["element_data_value_settings"]),
CreateOperationSettings("nodal_flag_value_output",
user_settings["nodal_flag_value_settings"]),
CreateOperationSettings("element_flag_value_output",
user_settings["element_flag_value_settings"]),
CreateOperationSettings("condition_flag_value_output",
user_settings["condition_flag_value_settings"]),
CreateOperationSettings("condition_data_value_output",
user_settings["condition_data_value_settings"])
]
core_settings[2]["list_of_operations"] = [
CreateOperationSettings("nodal_solution_step_data_output",
user_settings["nodal_solution_step_data_settings"]),
CreateOperationSettings("nodal_data_value_output",
user_settings["nodal_data_value_settings"]),
CreateOperationSettings("element_data_value_output",
user_settings["element_data_value_settings"]),
CreateOperationSettings("nodal_flag_value_output",
user_settings["nodal_flag_value_settings"]),
CreateOperationSettings("element_flag_value_output",
user_settings["element_flag_value_settings"]),
CreateOperationSettings("condition_flag_value_output",
user_settings["condition_flag_value_settings"]),
CreateOperationSettings("condition_data_value_output",
user_settings["condition_data_value_settings"])
]
for key in user_settings["output_time_settings"]:
core_settings[2]["controller_settings"][key] = user_settings["output_time_settings"][key]
core_settings[3]["controller_settings"][key] = user_settings["output_time_settings"][key]
return core_settings
|
the-stack_0_24732
|
from __future__ import print_function
import tensorflow as tf
import argparse
import os
from six.moves import cPickle
from char_rnn.model import Model
from six import text_type
# def main():
# parser = argparse.ArgumentParser(
# formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument('--save_dir', type=str, default='save',
# help='model directory to store checkpointed models')
# parser.add_argument('-n', type=int, default=500,
# help='number of characters to sample')
# parser.add_argument('--prime', type=text_type, default=u' ',
# help='prime text')
# parser.add_argument('--sample', type=int, default=1,
# help='0 to use max at each timestep, 1 to sample at '
# 'each timestep, 2 to sample on spaces')
# args = parser.parse_args()
# sample(args)
def load_model_to_memory(args):
print("Loading RNN model into Memory")
with open(os.path.join(args["save_dir"], 'config.pkl'), 'rb') as f:
saved_args = cPickle.load(f)
with open(os.path.join(args["save_dir"], 'chars_vocab.pkl'), 'rb') as f:
chars, vocab = cPickle.load(f)
model = Model(saved_args, training=False)
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(args["save_dir"])
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("RNN model loading complete.")
return (sess, chars, vocab, model)
else:
print("Model not available")
return NULL
# print(model.sample(sess, chars, vocab, args.n, args.prime,
# args.sample).encode('utf-8'))
# def sample(args):
# with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
# saved_args = cPickle.load(f)
# with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
# chars, vocab = cPickle.load(f)
# model = Model(saved_args, training=False)
# with tf.Session() as sess:
# tf.global_variables_initializer().run()
# saver = tf.train.Saver(tf.global_variables())
# ckpt = tf.train.get_checkpoint_state(args.save_dir)
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(sess, ckpt.model_checkpoint_path)
# print(model.sample(sess, chars, vocab, args.n, args.prime,
# args.sample).encode('utf-8'))
|
the-stack_0_24733
|
"""SQL formulas (formatted strings)."""
from typing import List, Union
from .. import END_OF_TIME, HASH_DELIMITER, METADATA_FIELDS, UNKNOWN
from ..driving_key_field import DrivingKeyField
from ..field import Field
# Formula used to COALESCE each business key to be included in staging table SELECT
# statement.
ALIASED_BUSINESS_KEY_SQL_TEMPLATE = (
f"COALESCE({{business_key}}, '{UNKNOWN}') AS {{business_key}}"
)
# Field with alias prepended.
ALIASED_FIELD_SQL_TEMPLATE = "{table_alias}.{field_name}"
# SQL expression that should be used to represent the end of times (9999-12-31)
# in SQL query filters.
END_OF_TIME_SQL_TEMPLATE = (
f"CAST('{END_OF_TIME.strftime('%Y-%m-%dT%H:%M:%S.%fZ')}' AS TIMESTAMP)"
)
# Formula used to calculate HASHDIFF fields. {hashdiff_expression} is the concatenation
# between all business keys plus descriptive_field delimited by HASH_DELIMITER.
# The REGEXP_REPLACE function is needed in order to avoid changes in hashdiffs
# when a new field is added to a satellite.
_HASH_DELIMITER_ESCAPED = HASH_DELIMITER.replace("|", "\\\\|")
HASHDIFF_SQL_TEMPLATE = (
f"MD5(REGEXP_REPLACE({{hashdiff_expression}}, "
f"'({_HASH_DELIMITER_ESCAPED})+$', '')) "
f"AS {{hashdiff}}"
)
# Formula used to calculate hashkeys. The {hashkey_expression} is the concatenation of
# all business keys plus child keys (if they exist) delimited by HASH_DELIMITER.
HASHKEY_SQL_TEMPLATE = "MD5({hashkey_expression}) AS {hashkey}"
# JOIN condition template SQL.
JOIN_CONDITION_SQL_TEMPLATE = (
"{table_1_alias}.{field_name} = {table_2_alias}.{field_name}"
)
# Formula used to calculate r_timestamp_end while populating satellites.
# If we find an existing version for a business key that we are going to INSERT,
# this previous version have to be "closed", with the new timestamp (this execution's
# extraction start timestamp) minus 1 millisecond.
RECORD_END_TIMESTAMP_SQL_TEMPLATE = (
f"LEAD(DATEADD(milliseconds, - 1, {METADATA_FIELDS['record_start_timestamp']}), 1, "
f"{END_OF_TIME_SQL_TEMPLATE}) OVER (PARTITION BY {{key_fields}} "
f"ORDER BY {METADATA_FIELDS['record_start_timestamp']}) AS "
f"{METADATA_FIELDS['record_end_timestamp']}"
)
# Formula used to create the record timestamp in staging table.
# This field will always be equivalent to the start of extraction process.
RECORD_START_TIMESTAMP_SQL_TEMPLATE = (
f"CAST('{{extract_start_timestamp}}' AS TIMESTAMP) AS "
f"{METADATA_FIELDS['record_start_timestamp']}"
)
# Formula used to create the record source field in staging table. A simple SQL constant
# aliased.
SOURCE_SQL_TEMPLATE = f"'{{source}}' AS {METADATA_FIELDS['record_source']}"
def format_fields_for_join(
fields: List[Union[Field, DrivingKeyField]],
table_1_alias: str,
table_2_alias: str,
) -> List[str]:
"""Get formatted list of field names for SQL JOIN condition.
Args:
fields: Fields to be formatted.
table_1_alias: Alias that should be used in the field on the left side of the
equality sign.
table_2_alias: alias that should be used in the field on the right side of the
equality sign.
Returns:
Fields list formatted for an SQL JOIN condition.
"""
return [
JOIN_CONDITION_SQL_TEMPLATE.format(
field_name=field.name,
table_1_alias=table_1_alias,
table_2_alias=table_2_alias,
)
for field in fields
]
def format_fields_for_select(
fields: List[Union[Field, DrivingKeyField]], table_alias: str = None
) -> List[str]:
"""Get formatted list of field names for SQL SELECT statement.
Args:
fields: Fields to be formatted.
table_alias: Alias that should be used in each field.
Returns:
Fields list formatted for SQL SELECT clause.
"""
if table_alias is not None:
return [
ALIASED_FIELD_SQL_TEMPLATE.format(
field_name=field.name, table_alias=table_alias
)
for field in fields
]
return [field.name for field in fields]
|
the-stack_0_24734
|
with open("./data/input02.txt") as inputFile:
input = inputFile.readlines()
valid_count1 = 0
valid_count2 = 0
for line in input:
items = line.split(' ')
numbers = items[0].split('-')
first = int(numbers[0])
second = int(numbers[1])
char = items[1].split(':')[0]
sample = items[2]
char_count = sample.count(char)
if char_count >= first and char_count <= second:
valid_count1 += 1
if sample[first-1] == char and sample[second-1] != char or sample[first-1] != char and sample[second-1] == char:
valid_count2 += 1
# Part One
print(valid_count1)
# Part Two
print(valid_count2)
|
the-stack_0_24735
|
import numpy as np
import matplotlib.pyplot as plt
plt.figure(1) # 创建图表1
plt.figure(2) # 创建图表2
ax1 = plt.subplot(211) # 在图表2中创建子图1
ax2 = plt.subplot(212) # 在图表2中创建子图2
x = np.linspace(0, 3, 100)
for i in range(5):
plt.figure(1) # ❶ # 选择图表1
plt.plot(x, np.exp(i * x / 3))
plt.sca(ax1) # ❷ # 选择图表2的子图1
plt.plot(x, np.sin(i * x))
plt.sca(ax2) # 选择图表2的子图2
plt.plot(x, np.cos(i * x))
plt.show()
|
the-stack_0_24736
|
# -*- coding: utf-8 -*-
'''
unit.loader
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test Salt's loader
'''
# Import Python libs
from __future__ import absolute_import
import inspect
import logging
import tempfile
import shutil
import os
import collections
import sys
import imp
import copy
# Import Salt Testing libs
from tests.support.unit import TestCase
from tests.support.mock import patch
from tests.support.paths import TMP
# Import Salt libs
import salt.config
import salt.utils
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
from salt.loader import LazyLoader, _module_dirs, grains, utils, proxy, minion_mods
log = logging.getLogger(__name__)
def remove_bytecode(module_path):
paths = [module_path + 'c']
if hasattr(imp, 'get_tag'):
modname, ext = os.path.splitext(module_path.split(os.sep)[-1])
paths.append(
os.path.join(os.path.dirname(module_path),
'__pycache__',
'{}.{}.pyc'.format(modname, imp.get_tag())))
for path in paths:
if os.path.exists(path):
os.unlink(path)
loader_template = '''
import os
from salt.utils.decorators import depends
@depends('os')
def loaded():
return True
@depends('non_existantmodulename')
def not_loaded():
return True
'''
class LazyLoaderTest(TestCase):
'''
Test the loader
'''
module_name = 'lazyloadertest'
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
# Setup the module
self.module_dir = tempfile.mkdtemp(dir=TMP)
self.module_file = os.path.join(self.module_dir,
'{0}.py'.format(self.module_name))
with salt.utils.fopen(self.module_file, 'w') as fh:
fh.write(loader_template)
fh.flush()
os.fsync(fh.fileno())
# Invoke the loader
self.loader = LazyLoader([self.module_dir], copy.deepcopy(self.opts), tag='module')
def tearDown(self):
shutil.rmtree(self.module_dir)
if os.path.isdir(self.module_dir):
shutil.rmtree(self.module_dir)
del self.module_dir
del self.module_file
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def test_depends(self):
'''
Test that the depends decorator works properly
'''
# Make sure depends correctly allowed a function to load. If this
# results in a KeyError, the decorator is broken.
self.assertTrue(
inspect.isfunction(
self.loader[self.module_name + '.loaded']
)
)
# Make sure depends correctly kept a function from loading
self.assertTrue(self.module_name + '.not_loaded' not in self.loader)
class LazyLoaderVirtualEnabledTest(TestCase):
'''
Test the base loader of salt.
'''
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['disable_modules'] = ['pillar']
cls.opts['grains'] = grains(cls.opts)
def setUp(self):
self.loader = LazyLoader(_module_dirs(copy.deepcopy(self.opts), 'modules', 'module'),
copy.deepcopy(self.opts),
tag='module')
def tearDown(self):
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def test_basic(self):
'''
Ensure that it only loads stuff when needed
'''
# make sure it starts empty
self.assertEqual(self.loader._dict, {})
# get something, and make sure its a func
self.assertTrue(inspect.isfunction(self.loader['test.ping']))
# make sure we only loaded "test" functions
for key, val in six.iteritems(self.loader._dict):
self.assertEqual(key.split('.', 1)[0], 'test')
# make sure the depends thing worked (double check of the depends testing,
# since the loader does the calling magically
self.assertFalse('test.missing_func' in self.loader._dict)
def test_badkey(self):
with self.assertRaises(KeyError):
self.loader[None] # pylint: disable=W0104
with self.assertRaises(KeyError):
self.loader[1] # pylint: disable=W0104
def test_disable(self):
self.assertNotIn('pillar.items', self.loader)
def test_len_load(self):
'''
Since LazyLoader is a MutableMapping, if someone asks for len() we have
to load all
'''
self.assertEqual(self.loader._dict, {})
len(self.loader) # force a load all
self.assertNotEqual(self.loader._dict, {})
def test_iter_load(self):
'''
Since LazyLoader is a MutableMapping, if someone asks to iterate we have
to load all
'''
self.assertEqual(self.loader._dict, {})
# force a load all
for key, func in six.iteritems(self.loader):
break
self.assertNotEqual(self.loader._dict, {})
def test_context(self):
'''
Make sure context is shared across modules
'''
# make sure it starts empty
self.assertEqual(self.loader._dict, {})
# get something, and make sure its a func
func = self.loader['test.ping']
with patch.dict(func.__globals__['__context__'], {'foo': 'bar'}):
self.assertEqual(self.loader['test.echo'].__globals__['__context__']['foo'], 'bar')
self.assertEqual(self.loader['grains.get'].__globals__['__context__']['foo'], 'bar')
def test_globals(self):
func_globals = self.loader['test.ping'].__globals__
self.assertEqual(func_globals['__grains__'], self.opts.get('grains', {}))
self.assertEqual(func_globals['__pillar__'], self.opts.get('pillar', {}))
# the opts passed into modules is at least a subset of the whole opts
for key, val in six.iteritems(func_globals['__opts__']):
if key in salt.config.DEFAULT_MASTER_OPTS and key not in salt.config.DEFAULT_MINION_OPTS:
# We loaded the minion opts, but somewhere in the code, the master options got pulled in
# Let's just not check for equality since the option won't even exist in the loaded
# minion options
continue
if key not in salt.config.DEFAULT_MASTER_OPTS and key not in salt.config.DEFAULT_MINION_OPTS:
# This isn't even a default configuration setting, lets carry on
continue
self.assertEqual(self.opts[key], val)
def test_pack(self):
self.loader.pack['__foo__'] = 'bar'
func_globals = self.loader['test.ping'].__globals__
self.assertEqual(func_globals['__foo__'], 'bar')
def test_virtual(self):
self.assertNotIn('test_virtual.ping', self.loader)
class LazyLoaderVirtualDisabledTest(TestCase):
'''
Test the loader of salt without __virtual__
'''
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = grains(cls.opts)
def setUp(self):
self.loader = LazyLoader(_module_dirs(copy.deepcopy(self.opts), 'modules', 'module'),
copy.deepcopy(self.opts),
tag='module',
virtual_enable=False)
def tearDown(self):
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def test_virtual(self):
self.assertTrue(inspect.isfunction(self.loader['test_virtual.ping']))
class LazyLoaderWhitelistTest(TestCase):
'''
Test the loader of salt with a whitelist
'''
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = grains(cls.opts)
def setUp(self):
self.loader = LazyLoader(_module_dirs(copy.deepcopy(self.opts), 'modules', 'module'),
copy.deepcopy(self.opts),
tag='module',
whitelist=['test', 'pillar'])
def tearDown(self):
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def test_whitelist(self):
self.assertTrue(inspect.isfunction(self.loader['test.ping']))
self.assertTrue(inspect.isfunction(self.loader['pillar.get']))
self.assertNotIn('grains.get', self.loader)
module_template = '''
__load__ = ['test', 'test_alias']
__func_alias__ = dict(test_alias='working_alias')
from salt.utils.decorators import depends
def test():
return {count}
def test_alias():
return True
def test2():
return True
@depends('non_existantmodulename')
def test3():
return True
@depends('non_existantmodulename', fallback_function=test)
def test4():
return True
'''
class LazyLoaderReloadingTest(TestCase):
'''
Test the loader of salt with changing modules
'''
module_name = 'loadertest'
module_key = 'loadertest.test'
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=TMP)
self.count = 0
opts = copy.deepcopy(self.opts)
dirs = _module_dirs(opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.utils = utils(opts)
self.proxy = proxy(opts)
self.minion_mods = minion_mods(opts)
self.loader = LazyLoader(dirs,
opts,
tag='module',
pack={'__utils__': self.utils,
'__proxy__': self.proxy,
'__salt__': self.minion_mods})
def tearDown(self):
shutil.rmtree(self.tmp_dir)
for attrname in ('tmp_dir', 'utils', 'proxy', 'loader', 'minion_mods', 'utils'):
try:
delattr(self, attrname)
except AttributeError:
continue
@classmethod
def tearDownClass(cls):
del cls.opts
def update_module(self):
self.count += 1
with salt.utils.fopen(self.module_path, 'wb') as fh:
fh.write(
salt.utils.to_bytes(
module_template.format(count=self.count)
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.module_path)
def rm_module(self):
os.unlink(self.module_path)
remove_bytecode(self.module_path)
@property
def module_path(self):
return os.path.join(self.tmp_dir, '{0}.py'.format(self.module_name))
def test_alias(self):
'''
Make sure that you can access alias-d modules
'''
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.assertNotIn('{0}.test_alias'.format(self.module_name), self.loader)
self.assertTrue(inspect.isfunction(self.loader['{0}.working_alias'.format(self.module_name)]))
def test_clear(self):
self.assertTrue(inspect.isfunction(self.loader['test.ping']))
self.update_module() # write out out custom module
self.loader.clear() # clear the loader dict
# force a load of our module
self.assertTrue(inspect.isfunction(self.loader[self.module_key]))
# make sure we only loaded our custom module
# which means that we did correctly refresh the file mapping
for k, v in six.iteritems(self.loader._dict):
self.assertTrue(k.startswith(self.module_name))
def test_load(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.assertTrue(inspect.isfunction(self.loader[self.module_key]))
def test__load__(self):
'''
If a module specifies __load__ we should only load/expose those modules
'''
self.update_module()
# ensure it doesn't exist
self.assertNotIn(self.module_key + '2', self.loader)
def test__load__and_depends(self):
'''
If a module specifies __load__ we should only load/expose those modules
'''
self.update_module()
# ensure it doesn't exist
self.assertNotIn(self.module_key + '3', self.loader)
self.assertNotIn(self.module_key + '4', self.loader)
def test_reload(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# make sure it updates correctly
for x in range(1, 3):
self.update_module()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), self.count)
self.rm_module()
# make sure that even if we remove the module, its still loaded until a clear
self.assertEqual(self.loader[self.module_key](), self.count)
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
virtual_aliases = ('loadertest2', 'loadertest3')
virtual_alias_module_template = '''
__virtual_aliases__ = {0}
def test():
return True
'''.format(virtual_aliases)
class LazyLoaderVirtualAliasTest(TestCase):
'''
Test the loader of salt with changing modules
'''
module_name = 'loadertest'
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=TMP)
opts = copy.deepcopy(self.opts)
dirs = _module_dirs(opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.utils = utils(opts)
self.proxy = proxy(opts)
self.minion_mods = minion_mods(opts)
self.loader = LazyLoader(dirs,
opts,
tag='module',
pack={'__utils__': self.utils,
'__proxy__': self.proxy,
'__salt__': self.minion_mods})
def tearDown(self):
del self.tmp_dir
del self.utils
del self.proxy
del self.minion_mods
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def update_module(self):
with salt.utils.fopen(self.module_path, 'wb') as fh:
fh.write(salt.utils.to_bytes(virtual_alias_module_template))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.module_path)
@property
def module_path(self):
return os.path.join(self.tmp_dir, '{0}.py'.format(self.module_name))
def test_virtual_alias(self):
'''
Test the __virtual_alias__ feature
'''
self.update_module()
mod_names = [self.module_name] + list(virtual_aliases)
for mod_name in mod_names:
func_name = '.'.join((mod_name, 'test'))
log.debug('Running %s (dict attribute)', func_name)
self.assertTrue(self.loader[func_name]())
log.debug('Running %s (loader attribute)', func_name)
self.assertTrue(getattr(self.loader, mod_name).test())
submodule_template = '''
from __future__ import absolute_import
import {0}.lib
def test():
return ({count}, {0}.lib.test())
'''
submodule_lib_template = '''
def test():
return {count}
'''
class LazyLoaderSubmodReloadingTest(TestCase):
'''
Test the loader of salt with changing modules
'''
module_name = 'loadertestsubmod'
module_key = 'loadertestsubmod.test'
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=TMP)
os.makedirs(self.module_dir)
self.count = 0
self.lib_count = 0
opts = copy.deepcopy(self.opts)
dirs = _module_dirs(opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.utils = utils(opts)
self.proxy = proxy(opts)
self.minion_mods = minion_mods(opts)
self.loader = LazyLoader(dirs,
opts,
tag='module',
pack={'__utils__': self.utils,
'__proxy__': self.proxy,
'__salt__': self.minion_mods}
)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
del self.tmp_dir
del self.utils
del self.proxy
del self.minion_mods
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def update_module(self):
self.count += 1
with salt.utils.fopen(self.module_path, 'wb') as fh:
fh.write(
salt.utils.to_bytes(
submodule_template.format(self.module_name, count=self.count)
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.module_path)
def rm_module(self):
os.unlink(self.module_path)
remove_bytecode(self.module_path)
def update_lib(self):
self.lib_count += 1
for modname in list(sys.modules):
if modname.startswith(self.module_name):
del sys.modules[modname]
with salt.utils.fopen(self.lib_path, 'wb') as fh:
fh.write(
salt.utils.to_bytes(
submodule_lib_template.format(count=self.lib_count)
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.lib_path)
def rm_lib(self):
for modname in list(sys.modules):
if modname.startswith(self.module_name):
del sys.modules[modname]
os.unlink(self.lib_path)
remove_bytecode(self.lib_path)
@property
def module_dir(self):
return os.path.join(self.tmp_dir, self.module_name)
@property
def module_path(self):
return os.path.join(self.module_dir, '__init__.py')
@property
def lib_path(self):
return os.path.join(self.module_dir, 'lib.py')
def test_basic(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.update_lib()
self.loader.clear()
self.assertIn(self.module_key, self.loader)
def test_reload(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# update both the module and the lib
for x in range(1, 3):
self.update_lib()
self.update_module()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader._dict)
self.assertIn(self.module_key, self.loader)
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
# update just the module
for x in range(1, 3):
self.update_module()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader._dict)
self.assertIn(self.module_key, self.loader)
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
# update just the lib
for x in range(1, 3):
self.update_lib()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader._dict)
self.assertIn(self.module_key, self.loader)
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
self.rm_module()
# make sure that even if we remove the module, its still loaded until a clear
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
def test_reload_missing_lib(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# update both the module and the lib
self.update_module()
self.update_lib()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
# remove the lib, this means we should fail to load the module next time
self.rm_lib()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
mod_template = '''
def test():
return ({val})
'''
class LazyLoaderModulePackageTest(TestCase):
'''
Test the loader of salt with changing modules
'''
module_name = 'loadertestmodpkg'
module_key = 'loadertestmodpkg.test'
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=TMP)
dirs = _module_dirs(copy.deepcopy(self.opts), 'modules', 'module')
dirs.append(self.tmp_dir)
self.loader = LazyLoader(dirs,
copy.deepcopy(self.opts),
tag='module')
def tearDown(self):
shutil.rmtree(self.tmp_dir)
del self.tmp_dir
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def update_pyfile(self, pyfile, contents):
dirname = os.path.dirname(pyfile)
if not os.path.exists(dirname):
os.makedirs(dirname)
with salt.utils.fopen(pyfile, 'wb') as fh:
fh.write(salt.utils.to_bytes(contents))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(pyfile)
def rm_pyfile(self, pyfile):
os.unlink(pyfile)
remove_bytecode(pyfile)
def update_module(self, relative_path, contents):
self.update_pyfile(os.path.join(self.tmp_dir, relative_path), contents)
def rm_module(self, relative_path):
self.rm_pyfile(os.path.join(self.tmp_dir, relative_path))
def test_module(self):
# ensure it doesn't exist
self.assertNotIn('foo', self.loader)
self.assertNotIn('foo.test', self.loader)
self.update_module('foo.py', mod_template.format(val=1))
self.loader.clear()
self.assertIn('foo.test', self.loader)
self.assertEqual(self.loader['foo.test'](), 1)
def test_package(self):
# ensure it doesn't exist
self.assertNotIn('foo', self.loader)
self.assertNotIn('foo.test', self.loader)
self.update_module('foo/__init__.py', mod_template.format(val=2))
self.loader.clear()
self.assertIn('foo.test', self.loader)
self.assertEqual(self.loader['foo.test'](), 2)
def test_module_package_collision(self):
# ensure it doesn't exist
self.assertNotIn('foo', self.loader)
self.assertNotIn('foo.test', self.loader)
self.update_module('foo.py', mod_template.format(val=3))
self.loader.clear()
self.assertIn('foo.test', self.loader)
self.assertEqual(self.loader['foo.test'](), 3)
self.update_module('foo/__init__.py', mod_template.format(val=4))
self.loader.clear()
self.assertIn('foo.test', self.loader)
self.assertEqual(self.loader['foo.test'](), 4)
deep_init_base = '''
from __future__ import absolute_import
import {0}.top_lib
import {0}.top_lib.mid_lib
import {0}.top_lib.mid_lib.bot_lib
def top():
return {0}.top_lib.test()
def mid():
return {0}.top_lib.mid_lib.test()
def bot():
return {0}.top_lib.mid_lib.bot_lib.test()
'''
class LazyLoaderDeepSubmodReloadingTest(TestCase):
module_name = 'loadertestsubmoddeep'
libs = ('top_lib', 'mid_lib', 'bot_lib')
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=TMP)
os.makedirs(self.module_dir)
self.lib_count = collections.defaultdict(int) # mapping of path -> count
# bootstrap libs
with salt.utils.fopen(os.path.join(self.module_dir, '__init__.py'), 'w') as fh:
# No .decode() needed here as deep_init_base is defined as str and
# not bytes.
fh.write(deep_init_base.format(self.module_name))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
self.lib_paths = {}
dir_path = self.module_dir
for lib_name in self.libs:
dir_path = os.path.join(dir_path, lib_name)
self.lib_paths[lib_name] = dir_path
os.makedirs(dir_path)
self.update_lib(lib_name)
opts = copy.deepcopy(self.opts)
dirs = _module_dirs(opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.utils = utils(opts)
self.proxy = proxy(opts)
self.minion_mods = minion_mods(opts)
self.loader = LazyLoader(dirs,
copy.deepcopy(opts),
tag='module',
pack={'__utils__': self.utils,
'__proxy__': self.proxy,
'__salt__': self.minion_mods}
)
self.assertIn('{0}.top'.format(self.module_name), self.loader)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
del self.tmp_dir
del self.lib_paths
del self.utils
del self.proxy
del self.minion_mods
del self.loader
del self.lib_count
@classmethod
def tearDownClass(cls):
del cls.opts
@property
def module_dir(self):
return os.path.join(self.tmp_dir, self.module_name)
def update_lib(self, lib_name):
for modname in list(sys.modules):
if modname.startswith(self.module_name):
del sys.modules[modname]
path = os.path.join(self.lib_paths[lib_name], '__init__.py')
self.lib_count[lib_name] += 1
with salt.utils.fopen(path, 'wb') as fh:
fh.write(
salt.utils.to_bytes(
submodule_lib_template.format(count=self.lib_count[lib_name])
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(path)
def test_basic(self):
self.assertIn('{0}.top'.format(self.module_name), self.loader)
def _verify_libs(self):
for lib in self.libs:
self.assertEqual(self.loader['{0}.{1}'.format(self.module_name, lib.replace('_lib', ''))](),
self.lib_count[lib])
def test_reload(self):
'''
Make sure that we can reload all libraries of arbitrary depth
'''
self._verify_libs()
# update them all
for lib in self.libs:
for x in range(5):
self.update_lib(lib)
self.loader.clear()
self._verify_libs()
|
the-stack_0_24737
|
import autofit as af
import autolens as al
from test_autolens.integration.tests.imaging import runner
test_type = "lens__source_inversion"
test_name = "lens_mass__source_adaptive_brightness"
data_type = "lens_sie__source_smooth"
data_resolution = "lsst"
def make_pipeline(name, phase_folders, non_linear_class=af.MultiNest):
phase1 = al.PhaseImaging(
phase_name="phase_1",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, mass=al.mp.EllipticalIsothermal),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
non_linear_class=non_linear_class,
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 40
phase1.optimizer.sampling_efficiency = 0.8
phase1.optimizer.evidence_tolerance = 10.0
phase2 = al.PhaseImaging(
phase_name="phase_2",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5, mass=phase1.result.instance.galaxies.lens.mass
),
source=al.GalaxyModel(
redshift=1.0,
pixelization=al.pix.VoronoiBrightnessImage,
regularization=al.reg.AdaptiveBrightness,
),
),
non_linear_class=non_linear_class,
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 40
phase2.optimizer.sampling_efficiency = 0.8
phase2.optimizer.evidence_tolerance = 10.0
phase3 = al.PhaseImaging(
phase_name="phase_3",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, mass=phase1.model.galaxies.lens.mass),
source=al.GalaxyModel(
redshift=1.0,
pixelization=phase2.result.instance.galaxies.source.pixelization,
regularization=phase2.result.instance.galaxies.source.regularization,
),
),
non_linear_class=non_linear_class,
)
phase3.optimizer.const_efficiency_mode = True
phase3.optimizer.n_live_points = 40
phase3.optimizer.sampling_efficiency = 0.8
phase3.optimizer.evidence_tolerance = 10.0
phase4 = al.PhaseImaging(
phase_name="phase_4_weighted_regularization",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5, mass=phase3.result.instance.galaxies.lens.mass
),
source=al.GalaxyModel(
redshift=1.0,
pixelization=phase2.model.galaxies.source.pixelization,
regularization=phase2.model.galaxies.source.pixelization,
),
),
non_linear_class=non_linear_class,
)
phase4.optimizer.const_efficiency_mode = True
phase4.optimizer.n_live_points = 40
phase4.optimizer.sampling_efficiency = 0.8
phase4.optimizer.evidence_tolerance = 10.0
return al.PipelineDataset(name, phase1, phase2, phase3, phase4)
if __name__ == "__main__":
import sys
runner.run(sys.modules[__name__])
|
the-stack_0_24744
|
import datetime
import hashlib
from mimetypes import MimeTypes
import pytz
import requests
from celery.task import task
from django.conf import settings
from django.core.mail import EmailMessage
from django.shortcuts import reverse
from django.template import Context, Template
from common.utils import convert_to_custom_timezone
from marketing.models import (Campaign, CampaignCompleted, CampaignLog,
Contact, ContactEmailCampaign, ContactList,
FailedContact, DuplicateContacts)
@task
def campaign_sechedule(request):
pass
@task
def campaign_open(request):
pass
@task
def campaign_click(request):
pass
@task
def upload_csv_file(data, invalid_data, user, contact_lists):
for each in data:
contact = Contact.objects.filter(email=each['email']).first()
if not contact:
contact = Contact.objects.create(
email=each['email'], created_by_id=user,
name=each['first name'])
if each.get('company name', None):
contact.company_name = each['company name']
if each.get('last name', None):
contact.last_name = each['last name']
if each.get('city', None):
contact.city = each['city']
if each.get("state", None):
contact.state = each['state']
contact.save()
else:
if not DuplicateContacts.objects.filter(
contacts=contact,
contact_list=ContactList.objects.get(id=int(contact_lists[0]))).exists():
DuplicateContacts.objects.create(
contacts=contact,
contact_list=ContactList.objects.get(id=int(contact_lists[0])))
for contact_list in contact_lists:
contact.contact_list.add(
ContactList.objects.get(id=int(contact_list)))
for each in invalid_data:
contact = FailedContact.objects.filter(email=each['email']).first()
if not contact:
contact = FailedContact.objects.create(
email=each['email'], created_by_id=user,
name=each['first name'])
if each.get('company name', None):
contact.company_name = each['company name']
if each.get('last name', None):
contact.last_name = each['last name']
if each.get('city', None):
contact.city = each['city']
if each.get("state", None):
contact.state = each['state']
contact.save()
for contact_list in contact_lists:
contact.contact_list.add(
ContactList.objects.get(id=int(contact_list)))
def send_campaign_mail(subject, content, from_email, to_email, bcc, reply_to, attachments):
msg = EmailMessage(
subject,
content,
from_email,
to_email,
bcc,
reply_to=reply_to,
)
for attachment in attachments:
msg.attach(*attachment)
msg.content_subtype = "html"
res = msg.send()
print(res)
def get_campaign_message_id(campaign):
hash_ = hashlib.md5()
hash_.update(
str(str(campaign.id) + str(campaign.campaign.created_by.id)).encode('utf-8') +
str(datetime.datetime.now()).encode('utf-8')
)
file_hash = hash_.hexdigest()
return file_hash
@task
def run_campaign(campaign, domain='demo.django-crm.io', protocol='https'):
try:
campaign = Campaign.objects.get(id=campaign)
attachments = []
if campaign.attachment:
file_path = campaign.attachment.path
file_name = file_path.split("/")[-1]
content = open(file_path, 'rb').read()
mime = MimeTypes()
mime_type = mime.guess_type(file_path)
attachments.append((file_name, content, mime_type[0]))
subject = campaign.subject
contacts = Contact.objects.filter(
contact_list__in=[each_list for each_list in campaign.contact_lists.all()])
default_html = campaign.html_processed
for each_contact in contacts:
html = default_html
campaign_log = CampaignLog.objects.create(contact=each_contact,
campaign=campaign)
if campaign.reply_to_email:
reply_to_email = campaign.reply_to_email
else:
message_id = get_campaign_message_id(campaign_log)
campaign_log.message_id = message_id
campaign_log.save()
domain_name = 'django-crm.com'
if campaign.from_email is not None:
from_email = campaign.from_email
else:
from_email = campaign.created_by.email
reply_to_email = str(from_email) + ' <' + \
str(message_id + '@' + domain_name + '') + '>'
if not (each_contact.is_bounced or each_contact.is_unsubscribed):
# domain_url = settings.URL_FOR_LINKS
domain_url = protocol + '://' + domain
img_src_url = domain_url + reverse('marketing:campaign_open', kwargs={
'campaign_log_id': campaign_log.id, 'email_id': each_contact.id})
# images can only be accessed over https
link = '<img src={img_src_url} alt="company_logo" title="company_logo" height="1" width="1" />'.format(
img_src_url=img_src_url)
# link = '<img src="' + domain_url + '/m/cm/track-email/' + \
# str(campaign_log.id) + '/contact/' + \
# str(each_contact.id) + '/" height="1" width="1" alt="company_logo" + \
# title="company_logo"/>'
unsubscribe_from_campaign_url = reverse(
'marketing:unsubscribe_from_campaign', kwargs={'contact_id': each_contact.id,
'campaign_id': campaign.id})
unsubscribe_from_campaign_html = "<br><br/><a href={}>Unsubscribe</a>".format(
domain_url + unsubscribe_from_campaign_url)
names_dict = {'company_name': each_contact.company_name if each_contact.company_name else '',
'last_name': each_contact.last_name if each_contact.last_name else '',
'city': each_contact.city if each_contact.city else '',
'state': each_contact.state if each_contact.state else '',
'first_name': each_contact.name,
'email': each_contact.email, 'email_id': each_contact.id,
'name': each_contact.name + ' ' + each_contact.last_name if each_contact.last_name else '',
'unsubscribe_from_campaign_url': unsubscribe_from_campaign_url}
html = Template(html).render(Context(names_dict))
mail_html = html + link + unsubscribe_from_campaign_html
from_email = str(campaign.from_name) + "<" + \
str(campaign.from_email) + '>'
to_email = [each_contact.email]
send_campaign_mail(
subject, mail_html, from_email, to_email, [], [reply_to_email], attachments)
except Exception as e:
print(e)
pass
@task
def run_all_campaigns():
start_date = datetime.date.today()
campaigns = Campaign.objects.filter(schedule_date_time__date=start_date)
for each in campaigns:
run_campaign(each.id)
@task
def list_all_bounces_unsubscribes():
bounces = requests.get('https://api.sendgrid.com/api/bounces.get.json?api_user=' +
settings.EMAIL_HOST_USER + '&api_key=' + settings.EMAIL_HOST_PASSWORD)
for each in bounces.json():
if type(each) == dict:
contact = Contact.objects.filter(email=each.get('email')).first()
if contact:
contact.is_bounced = True
contact.save()
bounces = requests.get('https://api.sendgrid.com/api/unsubscribes.get.json?api_user=' +
settings.EMAIL_HOST_USER + '&api_key=' + settings.EMAIL_HOST_PASSWORD)
for each in bounces.json():
if type(each) == dict:
contact = Contact.objects.filter(email=each.get('email')).first()
if contact:
contact.is_unsubscribed = True
contact.save()
@task
def send_scheduled_campaigns():
from datetime import datetime
campaigns = Campaign.objects.filter(schedule_date_time__isnull=False)
for each in campaigns:
completed = CampaignCompleted.objects.filter(
is_completed=True).values_list('campaign_id', flat=True)
if each.id not in completed:
schedule_date_time = each.schedule_date_time
sent_time = datetime.now().strftime('%Y-%m-%d %H:%M')
sent_time = datetime.strptime(sent_time, '%Y-%m-%d %H:%M')
local_tz = pytz.timezone(settings.TIME_ZONE)
sent_time = local_tz.localize(sent_time)
sent_time = convert_to_custom_timezone(
sent_time, each.timezone, to_utc=True)
if (
str(each.schedule_date_time.date()) == str(sent_time.date()) and
str(schedule_date_time.hour) == str(sent_time.hour)
):
run_campaign.delay(each.id)
CampaignCompleted.objects.create(
campaign=each, is_completed=True)
@task
def delete_multiple_contacts_tasks(contact_list_id, bounced=True):
""" this method is used to remove all contacts from a contact list based on bounced kwarg """
contacts_list_obj = ContactList.objects.filter(id=contact_list_id).first()
if contacts_list_obj:
contacts_objs = contacts_list_obj.contacts.filter(is_bounced=bounced)
if contacts_objs:
for contact_obj in contacts_objs:
if contact_obj.contact_list.count() > 1:
contact_obj.contact_list.remove(contacts_list_obj)
else:
contact_obj.delete()
@task
def send_campaign_email_to_admin_contact(campaign, domain='demo.django-crm.io', protocol='https'):
try:
campaign = Campaign.objects.get(id=campaign)
attachments = []
if campaign.attachment:
file_path = campaign.attachment.path
file_name = file_path.split("/")[-1]
content = open(file_path, 'rb').read()
mime = MimeTypes()
mime_type = mime.guess_type(file_path)
attachments.append((file_name, content, mime_type[0]))
subject = campaign.subject
contacts = ContactEmailCampaign.objects.all()
default_html = campaign.html_processed
for each_contact in contacts:
html = default_html
if campaign.reply_to_email:
reply_to_email = campaign.reply_to_email
else:
domain_name = 'django-crm.com'
if campaign.from_email is not None:
from_email = campaign.from_email
else:
from_email = campaign.created_by.email
reply_to_email = str(from_email) + ' <' + \
str(settings.EMAIL_HOST_USER + '@' + domain_name + '') + '>'
# domain_url = settings.URL_FOR_LINKS
domain_url = protocol + '://' + domain
# img_src_url = domain_url + reverse('marketing:campaign_open', kwargs={
# 'campaign_log_id': campaign_log.id, 'email_id': each_contact.id})
# # images can only be accessed over https
# link = '<img src={img_src_url} alt="company_logo" title="company_logo" height="1" width="1" />'.format(
# img_src_url=img_src_url)
# link = '<img src="' + domain_url + '/m/cm/track-email/' + \
# str(campaign_log.id) + '/contact/' + \
# str(each_contact.id) + '/" height="1" width="1" alt="company_logo" + \
# title="company_logo"/>'
# unsubscribe_from_campaign_url = reverse(
# 'marketing:unsubscribe_from_campaign', kwargs={'contact_id': each_contact.id,
# 'campaign_id': campaign.id})
# unsubscribe_from_campaign_html = "<br><br/><a href={}>Unsubscribe</a>".format(
# domain_url + unsubscribe_from_campaign_url)
# names_dict = {'company_name': '', 'city': '', 'state': '',
# 'last_name': each_contact.last_name if each_contact.last_name else '',
# 'email': each_contact.email, 'email_id': each_contact.id,
# 'name': each_contact.name + ' ' + each_contact.last_name if each_contact.last_name else '',
# }
# mail_html = html + link + unsubscribe_from_campaign_html
html = Template(html).render(Context({'email_id': each_contact.id}))
mail_html = html
from_email = str(campaign.from_name) + "<" + \
str(campaign.from_email) + '>'
to_email = [each_contact.email]
send_campaign_mail(
subject, mail_html, from_email, to_email, [], [reply_to_email], attachments)
except Exception as e:
print(e)
pass
|
the-stack_0_24745
|
#!/usr/bin/env python3
import os, sys
def main():
indir = os.path.dirname(os.path.realpath(__file__))
delim = '_'; ext = '.bib'
for fname in os.listdir(indir):
if ext not in fname: continue
with open(os.path.join(indir, fname), 'r') as infile:
key = fname.replace(ext,'')
print(key)
parts = key.split(delim)
new_key = delim.join([parts[2], parts[0], parts[1]])
content = infile.read().replace(key, new_key)
with open(os.path.join(indir, 'tmp', new_key+ext), 'w') as outfile:
outfile.write(content)
if __name__ == '__main__':
main()
|
the-stack_0_24746
|
#!/usr/bin/env python
"""The auditing system."""
import os
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
class TestAuditSystem(test_lib.FlowTestsBaseclass):
def testFlowExecution(self):
client_mock = action_mocks.ActionMock("ListDirectory", "StatFile")
for _ in test_lib.TestFlowHelper(
"ListDirectory", client_mock, client_id=self.client_id,
pathspec=rdfvalue.PathSpec(
path=os.path.join(self.base_path, "test_img.dd/test directory"),
pathtype=rdfvalue.PathSpec.PathType.OS),
token=self.token):
pass
fd = aff4.FACTORY.Open("aff4:/audit/log", token=self.token)
event = fd[0]
self.assertEqual(event.action, rdfvalue.AuditEvent.Action.RUN_FLOW)
self.assertEqual(event.flow_name, "ListDirectory")
self.assertEqual(event.user, self.token.username)
class FlowTestLoader(test_lib.GRRTestLoader):
base_class = TestAuditSystem
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv, testLoader=FlowTestLoader())
if __name__ == "__main__":
flags.StartMain(main)
|
the-stack_0_24747
|
from marlgrid.envs.cluttered import ClutteredMultiGrid
agents = marlgrid.agents.IndependentLearners(
TestRLAgent(),
TestRLAgent(),
TestRLAgent()
)
env = ClutteredMultiGrid(agents, grid_size=15, n_clutter=10)
for i_episode in range(N_episodes):
obs_array = env.reset()
with agents.episode():
episode_over = False
while not episode_over:
# env.render()
# Get an array with actions for each agent.
action_array = agents.action_step(obs_array)
# Step the multi-agent environment
next_obs_array, reward_array, done, _ = env.step(action_array)
# Save the transition data to replay buffers, if necessary
agents.save_step(obs_array, action_array, next_obs_array, reward_array, done)
obs_array = next_obs_array
episode_over = done
# or if "done" is per-agent:
episode_over = all(done) # or any(done)
|
the-stack_0_24748
|
from __future__ import annotations
import struct
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, Generator, Iterable, Iterator, List, Optional, Tuple, Union
from mitmproxy import contentviews, ctx, flow, flowfilter, http
from mitmproxy.contentviews import base
from mitmproxy.contrib.kaitaistruct.google_protobuf import GoogleProtobuf
from mitmproxy.contrib.kaitaistruct.vlq_base128_le import VlqBase128Le
from mitmproxy.net.encoding import decode
class ProtoParser:
@dataclass
class ParserRule:
"""
A parser rule lists Field definitions which are applied if the filter rule matches the flow.
Matching on flow-level also means, a match applies to request AND response messages.
To restrict a rule to a requests only use 'ParserRuleRequest', instead.
To restrict a rule to a responses only use 'ParserRuleResponse', instead.
"""
field_definitions: List[ProtoParser.ParserFieldDefinition]
"""List of field definitions for this rule """
name: str = ""
"""Name of this rule, only used for debugging"""
filter: str = ""
"""
Flowfilter to select which flows to apply to ('~q' and '~s' can not be used to distinguish
if the rule should apply to the request or response of a flow. To do so, use ParserRuleRequest
or ParserRuleResponse. ParserRule always applies to request and response.)
"""
@dataclass
class ParserRuleResponse(ParserRule):
"""
A parser rule lists Field definitions which are applied if the filter rule matches the flow.
The rule only applies if the processed message is a server response.
"""
pass
@dataclass
class ParserRuleRequest(ParserRule):
"""
A parser rule lists Field definitions which are applied if the filter rule matches the flow.
The rule only applies if the processed message is a client request.
"""
pass
@dataclass
class ParserFieldDefinition:
"""
Defines how to parse a field (or multiple fields with the same tag) in a protobuf messages.
This allows to apply an intended decoding (f.e. decode uint64 as double instead) and to assign
a descriptive name to a field. Field definitions are aggregated into rules, which also holds
a filter to match selected HTTP messages.
The most natural way to use this, is to describe known parts of a single protobuf message
in a set of field descriptors, pack them into a rule and set the filter of the rule in a way,
that it only applies to proper protobuf messages (f.e. to request traffic against an API endpoint
matched by an URL flowfilter)
"""
# A 'tag' could be considered as "absolute path" to match a unique field, yet
# protobuf allows to uses the same nested message in different positions of the parent message
# The 'tag_prefixes' parameter allows to apply the field definition to different "leafs nodes"
# of a message.
#
# Example 1: match a single, absolute tag
# ----------
# tag = '1.2'
# tag_prefixes = [] (default)
#
# applies to: tag '1.2'
#
# Example 2: match multiple tags with same ending
# ----------
# tag = '1.3'
# tag_prefixes = ['1.2.', '2.5.']
#
# applies to: tag '1.2.1.3' and tag '2.5.1.3'
# does not apply to: '1.3', unless tag_prefixes is extended to tag_prefixes = ['1.2', '2.5', '']
#
# Example 3: match multiple tags
# ----------
# tag = ''
# tag_prefixes = ['1.2', '2.5']
#
# applies to: tag '1.2' and tag '1.5'
tag: str
"""Field tag for which this description applies (including flattened tag path, f.e. '1.2.2.4')"""
tag_prefixes: List[str] = field(default_factory=list)
"""List of prefixes for tag matching (f.e. tag_prefixes=['1.2.', '2.2.'] with tag='1' matches '1.2.1' and '2.2.1')"""
intended_decoding: Optional[ProtoParser.DecodedTypes] = None
"""optional: intended decoding for visualization (parser fails over to alternate decoding if not possible)"""
name: Optional[str] = None
"""optional: intended field for visualization (parser fails over to alternate decoding if not possible)"""
@dataclass
class ParserOptions:
# output should contain wiretype of fields
include_wiretype: bool = False
# output should contain the fields which describe nested messages
# (the nested messages bodies are always included, but the "header fields" could
# add unnecessary output overhead)
exclude_message_headers: bool = False
# optional: rules
# rules: List[ProtoParser.ParserRule] = field(default_factory=list)
class DecodedTypes(Enum):
# varint
int32 = 0
int64 = 1
uint32 = 2
uint64 = 3
sint32 = 4 # ZigZag encoding
sint64 = 5 # ZigZag encoding
bool = 6
enum = 7
# bit_32
fixed32 = 8
sfixed32 = 9
float = 10
# bit_64
fixed64 = 11
sfixed64 = 12
double = 13
# len_delimited
string = 14
bytes = 15
message = 16
packed_repeated_field = 17
# helper
unknown = 18
class Message:
def __init__(
self,
data: bytes,
options: ProtoParser.ParserOptions,
rules: List[ProtoParser.ParserRule],
parent_field: ProtoParser.Field = None,
) -> None:
self.data: bytes = data
self.parent_field: Optional[ProtoParser.Field] = parent_field
self.options: ProtoParser.ParserOptions = options
self.rules: List[ProtoParser.ParserRule] = rules
try:
self.fields: List[ProtoParser.Field] = self.parse_message_fields(data)
except:
raise ValueError("not a valid protobuf message")
def parse_message_fields(self, message: bytes) -> List:
res: List[ProtoParser.Field] = []
pb: GoogleProtobuf = GoogleProtobuf.from_bytes(message)
for pair in pb.pairs:
tag = pair.field_tag
wt = pair.wire_type
if wt == GoogleProtobuf.Pair.WireTypes.group_start or wt == GoogleProtobuf.Pair.WireTypes.group_end:
# ignore deprecated types without values
continue
v: Union[GoogleProtobuf.DelimitedBytes, VlqBase128Le] = pair.value # for WireType bit-32 and bit-64
preferred_decoding = ProtoParser.DecodedTypes.unknown
# see: https://www.oreilly.com/library/view/grpc-up-and/9781492058328/ch04.html
if wt == GoogleProtobuf.Pair.WireTypes.len_delimited:
assert isinstance(v, GoogleProtobuf.DelimitedBytes)
v = v.body
assert isinstance(v, bytes)
# always try to parse length delimited data as nested protobuf message
preferred_decoding = ProtoParser.DecodedTypes.message
if wt == GoogleProtobuf.Pair.WireTypes.varint:
assert isinstance(v, VlqBase128Le)
v = v.value
assert isinstance(v, int)
if v.bit_length() > 32:
preferred_decoding = ProtoParser.DecodedTypes.uint64
else:
preferred_decoding = ProtoParser.DecodedTypes.uint32
if wt == GoogleProtobuf.Pair.WireTypes.bit_64:
# exists in Protobuf for efficient encoding, when decoded comes down to uint64
assert isinstance(v, int)
preferred_decoding = ProtoParser.DecodedTypes.fixed64
if wt == GoogleProtobuf.Pair.WireTypes.bit_32:
# exists in Protobuf for efficient encoding, when decoded comes down to uint32
assert isinstance(v, int)
preferred_decoding = ProtoParser.DecodedTypes.fixed32
field = ProtoParser.Field(
preferred_decoding=preferred_decoding,
wire_type=wt,
tag=tag,
wire_value=v,
owning_message=self,
options=self.options,
rules=self.rules
)
res.append(field)
return res
def gen_fields(self) -> Generator[ProtoParser.Field, None, None]:
for f in self.fields:
yield f
def gen_flat_decoded_field_dicts(self) -> Generator[Dict, None, None]:
"""
This generator returns a flattened version of the fields from a message (including nested fields)
A single entry has the form:
{
"tag": str # fully qualified tag (all tags starting from the root message, concatenated with '.' delimiter)
"wireType": str # describes the wire encoding used by the field
"decoding": str # describes the chosen decoding (interpretation of wire encoding, according to protobuf types)
"val": Union[bool, str, bytes, int, float] # the decoded value in python representation
}
"""
# iterate over fields
for f in self.gen_fields():
# convert field and nested fields to dicts
for d in f.gen_flat_decoded_field_dicts():
yield d
def gen_string_rows(self) -> Generator[Tuple[str, ...], None, None]:
# Excluding fields containing message headers simplifies the view, but without
# knowing the message tags, they can not be used in a custom definition, in order
# to declare a different interpretation for the message (the message is a length-delimeted
# field value, which could alternatively be parsed as 'str' or 'bytes' if the field tag
# is known)
for field_dict in self.gen_flat_decoded_field_dicts():
if self.options.exclude_message_headers and field_dict["decoding"] == "message":
continue
if self.options.include_wiretype:
col1 = "[{}->{}]".format(field_dict["wireType"], field_dict["decoding"])
else:
col1 = "[{}]".format(field_dict["decoding"])
col2 = field_dict["name"] # empty string if not set (consumes no space)
col3 = field_dict["tag"]
col4 = str(field_dict["val"])
yield col1, col2, col3, col4
class Field:
"""
Represents a single field of a protobuf message and handles the varios encodings.
As mitmproxy sees the data passing by as raw protobuf message, it only knows the
WireTypes. Each of the WireTypes could represent different Protobuf field types.
The exact Protobuf field type can not be determined from the wire format, thus different
options for decoding have to be supported.
In addition the parsed WireTypes are (intermediary) stored in Python types, which adds
some additional overhead type conversions.
WireType represented Protobuf Types Python type (intermediary)
0: varint int32, int64, uint32, uint64, enum, int (*)
sint32, sint64 (both ZigZag encoded), int
bool bool
float (**)
1: bit_64 fixed64, sfixed64, int (*)
double float
2: len_delimited string, str
message, class 'Message'
bytes, bytes (*)
packed_repeated_field class 'Message' (fields with same tag)
3: group_start unused (deprecated) -
4: group_end unused (deprecated) -
5: bit_32 fixed32, sfixed32, int (*)
float float
(*) Note 1: Conversion between WireType and intermediary python representation
is handled by Kaitai protobuf decoder and always uses the python
representation marked with (*). Converting to alternative representations
is handled inside this class.
(**) Note 2: Varint is not used to represent floating point values, but some applications
store native floats in uint32 protobuf types (or native double in uint64).
Thus we allow conversion of varint to floating point values for convenience
(A well known APIs "hide" GPS latitude and longitude values in varint types,
much easier to spot such things when rendered as float)
Ref: - https://developers.google.com/protocol-buffers/docs/proto3
- https://developers.google.com/protocol-buffers/docs/encoding
"""
def __init__(
self,
wire_type: GoogleProtobuf.Pair.WireTypes,
preferred_decoding: ProtoParser.DecodedTypes,
tag: int,
wire_value: Union[int, bytes],
owning_message: ProtoParser.Message,
options: ProtoParser.ParserOptions,
rules: List[ProtoParser.ParserRule]
) -> None:
self.wire_type: GoogleProtobuf.Pair.WireTypes = wire_type
self.preferred_decoding: ProtoParser.DecodedTypes = preferred_decoding
self.wire_value: Union[int, bytes] = wire_value
self.tag: int = tag
self.owning_message: ProtoParser.Message = owning_message
self.options: ProtoParser.ParserOptions = options
self.name: str = ""
self.rules: List[ProtoParser.ParserRule] = rules
self.parent_tags: List[int]
if not self.owning_message.parent_field:
self.parent_tags = []
else:
self.parent_tags = self.owning_message.parent_field.parent_tags[:]
self.parent_tags.append(self.owning_message.parent_field.tag)
self.apply_rules()
# no tests for only_first_hit=False, as not user-changable
def apply_rules(self, only_first_hit=True):
tag_str = self._gen_tag_str()
name = None
decoding = None
try:
for rule in self.rules:
for fd in rule.field_definitions:
match = False
if len(fd.tag_prefixes) == 0 and fd.tag == tag_str:
match = True
else:
for rt in fd.tag_prefixes:
if rt + fd.tag == tag_str:
match = True
break
if match:
if only_first_hit:
# only first match
self.name = fd.name
self.preferred_decoding = fd.intended_decoding
return
else:
# overwrite matches till last rule was inspected
# (f.e. allows to define name in one rule and intended_decoding in another one)
name = fd.name if fd.name else name
decoding = fd.intended_decoding if fd.intended_decoding else decoding
if name:
self.name = name
if decoding:
self.preferred_decoding = decoding
except Exception as e:
ctx.log.warn(e)
pass
def _gen_tag_str(self):
tags = self.parent_tags[:]
tags.append(self.tag)
return ".".join([str(tag) for tag in tags])
def safe_decode_as(
self,
intended_decoding: ProtoParser.DecodedTypes
) -> Tuple[ProtoParser.DecodedTypes, Union[bool, float, int, bytes, str, ProtoParser.Message]]:
"""
Tries to decode as intended, applies failover, if not possible
Returns selected decoding and decoded value
"""
if self.wire_type == GoogleProtobuf.Pair.WireTypes.varint:
try:
return intended_decoding, self.decode_as(intended_decoding)
except:
if int(self.wire_value).bit_length() > 32:
# ignore the fact that varint could exceed 64bit (would violate the specs)
return ProtoParser.DecodedTypes.uint64, self.wire_value
else:
return ProtoParser.DecodedTypes.uint32, self.wire_value
elif self.wire_type == GoogleProtobuf.Pair.WireTypes.bit_64:
try:
return intended_decoding, self.decode_as(intended_decoding)
except:
return ProtoParser.DecodedTypes.fixed64, self.wire_value
elif self.wire_type == GoogleProtobuf.Pair.WireTypes.bit_32:
try:
return intended_decoding, self.decode_as(intended_decoding)
except:
return ProtoParser.DecodedTypes.fixed32, self.wire_value
elif self.wire_type == GoogleProtobuf.Pair.WireTypes.len_delimited:
try:
return intended_decoding, self.decode_as(intended_decoding)
except:
# failover strategy: message --> string (valid UTF-8) --> bytes
len_delimited_strategy: List[ProtoParser.DecodedTypes] = [
ProtoParser.DecodedTypes.message,
ProtoParser.DecodedTypes.string,
ProtoParser.DecodedTypes.bytes # should always work
]
for failover_decoding in len_delimited_strategy:
if failover_decoding == intended_decoding:
continue # don't try it twice
try:
return failover_decoding, self.decode_as(failover_decoding)
except:
# move on with next
pass
# we should never get here (could not be added to tests)
return ProtoParser.DecodedTypes.unknown, self.wire_value
def decode_as(
self,
intended_decoding: ProtoParser.DecodedTypes
) -> Union[bool, int, float, bytes, str, ProtoParser.Message]:
if self.wire_type == GoogleProtobuf.Pair.WireTypes.varint:
assert isinstance(self.wire_value, int)
if intended_decoding == ProtoParser.DecodedTypes.bool:
return self.wire_value != 0
elif intended_decoding == ProtoParser.DecodedTypes.int32:
if self.wire_value.bit_length() > 32:
raise TypeError("wire value too large for int32")
return struct.unpack("!i", struct.pack("!I", self.wire_value))[0]
elif intended_decoding == ProtoParser.DecodedTypes.int64:
if self.wire_value.bit_length() > 64:
# currently avoided by kaitai decoder (can not be added to tests)
raise TypeError("wire value too large for int64")
return struct.unpack("!q", struct.pack("!Q", self.wire_value))[0]
elif intended_decoding == ProtoParser.DecodedTypes.uint32:
if self.wire_value.bit_length() > 32:
raise TypeError("wire value too large for uint32")
return self.wire_value # already 'int' which was parsed as unsigned
elif (
intended_decoding == ProtoParser.DecodedTypes.uint64 or
intended_decoding == ProtoParser.DecodedTypes.enum
):
if self.wire_value.bit_length() > 64:
# currently avoided by kaitai decoder (can not be added to tests)
raise TypeError("wire value too large")
return self.wire_value # already 'int' which was parsed as unsigned
elif intended_decoding == ProtoParser.DecodedTypes.sint32:
if self.wire_value.bit_length() > 32:
raise TypeError("wire value too large for sint32")
return (self.wire_value >> 1) ^ -(self.wire_value & 1) # zigzag_decode
elif intended_decoding == ProtoParser.DecodedTypes.sint64:
if self.wire_value.bit_length() > 64:
# currently avoided by kaitai decoder (can not be added to tests)
raise TypeError("wire value too large for sint64")
# ZigZag decode
# Ref: https://gist.github.com/mfuerstenau/ba870a29e16536fdbaba
return (self.wire_value >> 1) ^ -(self.wire_value & 1)
elif (
intended_decoding == ProtoParser.DecodedTypes.float or
intended_decoding == ProtoParser.DecodedTypes.double
):
# special case, not complying to protobuf specs
return self._wire_value_as_float()
elif self.wire_type == GoogleProtobuf.Pair.WireTypes.bit_64:
if intended_decoding == ProtoParser.DecodedTypes.fixed64:
return self.wire_value
elif intended_decoding == ProtoParser.DecodedTypes.sfixed64:
return struct.unpack("!q", struct.pack("!Q", self.wire_value))[0]
elif intended_decoding == ProtoParser.DecodedTypes.double:
return self._wire_value_as_float()
elif self.wire_type == GoogleProtobuf.Pair.WireTypes.bit_32:
if intended_decoding == ProtoParser.DecodedTypes.fixed32:
return self.wire_value
elif intended_decoding == ProtoParser.DecodedTypes.sfixed32:
return struct.unpack("!i", struct.pack("!I", self.wire_value))[0]
elif intended_decoding == ProtoParser.DecodedTypes.float:
return self._wire_value_as_float()
elif self.wire_type == GoogleProtobuf.Pair.WireTypes.len_delimited:
assert isinstance(self.wire_value, bytes)
if intended_decoding == ProtoParser.DecodedTypes.string:
# According to specs, a protobuf string HAS TO be UTF-8 parsable
# throw exception on invalid UTF-8 chars, but escape linebreaks
return self.wire_value_as_utf8(escape_invalid=False, escape_newline=True)
elif intended_decoding == ProtoParser.DecodedTypes.bytes:
# always works, assure to hand back a copy
return self.wire_value[:]
elif intended_decoding == ProtoParser.DecodedTypes.packed_repeated_field:
raise NotImplementedError("currently not needed")
elif intended_decoding == ProtoParser.DecodedTypes.message:
return ProtoParser.Message(
data=self.wire_value,
options=self.options,
parent_field=self,
rules=self.rules
)
# if here, there is no valid decoding
raise TypeError("intended decoding mismatches wire type")
def encode_from(inputval, intended_encoding: ProtoParser.DecodedTypes):
raise NotImplementedError(
"Future work, needed to manipulate and re-encode protobuf message, with respect to given wire types"
)
def _wire_value_as_float(self) -> float:
"""
Handles double (64bit) and float (32bit).
Assumes Network Byte Order (big endian).
Usable for:
WireType --> Protobuf Type):
----------------------------
varint --> double/float (not intended by ProtoBuf, but used in the wild)
bit_32 --> float
bit_64 --> double
len_delimited --> 4 bytes: float / 8 bytes: double / other sizes return NaN
"""
v = self._value_as_bytes()
if len(v) == 4:
return struct.unpack("!f", v)[0]
elif len(v) == 8:
return struct.unpack("!d", v)[0]
# no need to raise an Exception
raise TypeError("can not be converted to floatingpoint representation")
def _value_as_bytes(self) -> bytes:
if isinstance(self.wire_value, bytes):
return self.wire_value
elif isinstance(self.wire_value, int):
if self.wire_value.bit_length() > 64:
# source for a python int are wiretypes varint/bit_32/bit64 and should never convert to int values 64bit
# currently avoided by kaitai decoder (can not be added to tests)
raise ValueError("Value exceeds 64bit, violating protobuf specs")
elif self.wire_value.bit_length() > 32:
# packing uses network byte order (to assure consistent results across architectures)
return struct.pack("!Q", self.wire_value)
else:
# packing uses network byte order (to assure consistent results across architectures)
return struct.pack("!I", self.wire_value)
else:
# should never happen, no tests
raise ValueError("can not be converted to bytes")
def _wire_type_str(self):
return str(self.wire_type).split(".")[-1]
def _decoding_str(self, decoding: ProtoParser.DecodedTypes):
return str(decoding).split(".")[-1]
def wire_value_as_utf8(self, escape_invalid=True, escape_newline=True) -> str:
if isinstance(self.wire_value, bytes):
if escape_invalid:
res = self.wire_value.decode("utf-8", "backslashreplace")
else:
res = self.wire_value.decode("utf-8")
return res.replace("\n", "\\n") if escape_newline else res
return str(self.wire_value)
def gen_flat_decoded_field_dicts(self) -> Generator[Dict, None, None]:
"""
Returns a generator which passes the field as a dict.
In order to return the field value it gets decoded (based on a failover strategy and
provided ParserRules).
If the field holds a nested message, the fields contained in the message are appended.
Ultimately this flattens all fields recursively.
"""
selected_decoding, decoded_val = self.safe_decode_as(self.preferred_decoding)
field_desc_dict = {
"tag": self._gen_tag_str(),
"wireType": self._wire_type_str(),
"decoding": self._decoding_str(selected_decoding),
"name": self.name,
}
if isinstance(decoded_val, ProtoParser.Message):
field_desc_dict["val"] = "" # message has no value, because contained fields get appended (flattened)
yield field_desc_dict
# the value is an embedded message, thus add the message fields
for f in decoded_val.gen_fields():
for field_dict in f.gen_flat_decoded_field_dicts():
yield field_dict
else:
field_desc_dict["val"] = decoded_val
yield field_desc_dict
def __init__(
self,
data: bytes,
rules: List[ProtoParser.ParserRule] = None,
parser_options: ParserOptions = None
) -> None:
self.data: bytes = data
if parser_options is None:
parser_options = ProtoParser.ParserOptions()
self.options = parser_options
if rules is None:
rules = []
self.rules = rules
self.root_message: ProtoParser.Message = ProtoParser.Message(
data=data,
options=self.options,
rules=self.rules
)
def gen_str_rows(self) -> Generator[Tuple[str, ...], None, None]:
for f in self.root_message.gen_string_rows():
yield f
# Note: all content view formating functionality is kept out of the ProtoParser class, to
# allow it to be use independently.
# This function is generic enough, to consider moving it to mitmproxy.contentviews.base
def format_table(
table_rows: Iterable[Tuple[str, ...]],
max_col_width=100,
) -> Iterator[base.TViewLine]:
"""
Helper function to render tables with variable column count (move to contentview base, if needed elsewhere)
Note: The function has to convert generators to a list, as all rows have to be processed twice (to determine
the column widths first).
"""
rows: List[Tuple[str, ...]] = []
col_count = 0
cols_width: List[int] = []
for row in table_rows:
col_count = max(col_count, len(row))
while len(cols_width) < col_count:
cols_width.append(0)
for col_num in range(len(row)):
cols_width[col_num] = max(len(row[col_num]), cols_width[col_num])
# store row in list
rows.append(row)
for i in range(len(cols_width)):
cols_width[i] = min(cols_width[i], max_col_width)
for row in rows:
line: base.TViewLine = []
for col_num in range(len(row)):
col_val = row[col_num].ljust(cols_width[col_num] + 2)
line.append(("text", col_val))
yield line
def parse_grpc_messages(data, compression_scheme) -> Generator[Tuple[bool, bytes], None, None]:
"""Generator iterates over body data and returns a boolean indicating if the messages
was compressed, along with the raw message data (decompressed) for each gRPC message
contained in the body data"""
while data:
try:
msg_is_compressed, length = struct.unpack('!?i', data[:5])
decoded_message = struct.unpack('!%is' % length, data[5:5 + length])[0]
except Exception as e:
raise ValueError("invalid gRPC message") from e
if msg_is_compressed:
try:
decoded_message = decode(encoded=decoded_message, encoding=compression_scheme)
except Exception as e:
raise ValueError("Failed to decompress gRPC message with gzip") from e
yield msg_is_compressed, decoded_message
data = data[5 + length:]
# hacky fix for mitmproxy issue:
#
# mitmproxy handles Exceptions in the contenview's __call__ function, by
# failing over to 'Raw' view. The intention was to use this behavior to
# pass up Exceptions thrown inside the generator function ('format_pbuf'
# and 'format_grpc') to the __call__ function.
# This usually works fine if the contentview is initialized on a flow
# with invalid data.
# When the flow data gets invalidated in the edit mode, mitmproxy re-calls
# the generator functions outside the contentviews '__call__' method.
#
# This happens in the 'safe_to_print' function of 'mitmproxy/contentvies/__init__.py'
#
# def safe_to_print(lines, encoding="utf8"):
# """
# Wraps a content generator so that each text portion is a *safe to print* unicode string.
# """
# for line in lines: # <------ this code re-iterates lines and thus calls generators, without using the views __call__ function
# clean_line = []
# for (style, text) in line:
# if isinstance(text, bytes):
# text = text.decode(encoding, "replace")
# text = strutils.escape_control_characters(text)
# clean_line.append((style, text))
# yield clean_line
#
# In result, mitmproxy crashes if the generator functions raise Exception to indicate
# data parsing errors.
# To deal with this, the generator function gets converted into a list inside the
# __call__ function. Ultimately, exceptions are raised directly from within __call__
# instead in cases where the generator is accessed externally without exception handling.
def hack_generator_to_list(generator_func):
return list(generator_func)
def format_pbuf(message: bytes, parser_options: ProtoParser.ParserOptions, rules: List[ProtoParser.ParserRule]):
for l in format_table(ProtoParser(data=message, parser_options=parser_options, rules=rules).gen_str_rows()):
yield l
def format_grpc(
data: bytes,
parser_options: ProtoParser.ParserOptions,
rules: List[ProtoParser.ParserRule],
compression_scheme="gzip"
):
message_count = 0
for compressed, pb_message in parse_grpc_messages(data=data, compression_scheme=compression_scheme):
headline = 'gRPC message ' + str(message_count) + ' (compressed ' + str(
compression_scheme if compressed else compressed) + ')'
yield [("text", headline)]
for l in format_pbuf(
message=pb_message,
parser_options=parser_options,
rules=rules
):
yield l
@dataclass
class ViewConfig:
parser_options: ProtoParser.ParserOptions = ProtoParser.ParserOptions()
parser_rules: List[ProtoParser.ParserRule] = field(default_factory=list)
class ViewGrpcProtobuf(base.View):
"""Human friendly view of protocol buffers"""
name = "gRPC/Protocol Buffer"
__content_types_pb = [
"application/x-protobuf",
"application/x-protobuffer",
"application/grpc-proto",
]
__content_types_grpc = [
"application/grpc",
]
# first value serves as default algorithm for compressed messages, if 'grpc-encoding' header is missing
__valid_grpc_encodings = [
"gzip",
"identity",
"deflate",
]
# allows to take external ParserOptions object. goes with defaults otherwise
def __init__(self, config: ViewConfig = None) -> None:
super().__init__()
if config is None:
config = ViewConfig()
self.config = config
def _matching_rules(
self,
rules: List[ProtoParser.ParserRule],
message: Optional[http.Message],
flow: Optional[flow.Flow]
) -> List[ProtoParser.ParserRule]:
"""
Checks which of the give rules applies and returns a List only containing those rules
Each rule defines a flow filter in rule.filter which is usually matched against a flow.
When it comes to protobuf parsing, in most cases request messages differ from response messages.
Thus, it has to be possible to apply a rule to a http.Request or a http.Response, only.
As the name flowfilter suggests, filters are working on a flow-level, not on message-level.
This means:
- the filter expression '~q' matches all flows with a request, but no response
- the filter expression '~s' matches all flows with a response
In result, for complete flows (with a gRPC message in the request and the response), ParserRules would
either be applied to request and response at the same time ('~s') or neither would match request, nor
response (~q).
To distinguish between rules which should be applied to response messages, request messages or both
(while being applied to the whole flow), different classes with same behavior are used to wrap rules:
- ParserRule: applies to requests and responses
- ParserRuleRequest: applies to requests only
- ParserRuleResponse: applies to responses only
"""
res: List[ProtoParser.ParserRule] = []
if not flow:
return res
is_request = isinstance(message, http.Request)
for rule in rules:
# message based rule matching
if is_request and isinstance(rule, ProtoParser.ParserRuleResponse):
continue
elif not is_request and isinstance(rule, ProtoParser.ParserRuleRequest):
continue
# flow based rule matching
if flowfilter.match(rule.filter, flow=flow):
res.append(rule)
return res
def __call__(
self,
data: bytes,
*,
content_type: Optional[str] = None,
flow: Optional[flow.Flow] = None,
http_message: Optional[http.Message] = None,
**unknown_metadata,
) -> contentviews.TViewResult:
applicabble_rules = self._matching_rules(rules=self.config.parser_rules, flow=flow, message=http_message)
if content_type in self.__content_types_grpc:
# If gRPC messages are flagged to be compressed, the compression algorithm is expressed in the
# 'grpc-encoding' header.
#
# The following code tries to determine the compression algorithm base on this header.
# If the header is not present or contains an unsupported compression, the logic falls back to
# 'gzip'.
#
# If a compressed gRPC message is found in the body data (compressed flag set), the information
# on the compression scheme is needed (even if not set by a header), in order to process the message.
# Thus we assure there is always an encoding selected. An encoding of 'Identity' would not make
# sense, if a message is flagged as being compressed, that's why a default is chosen.
try:
assert http_message is not None
h = http_message.headers["grpc-encoding"]
grpc_encoding = h if h in self.__valid_grpc_encodings else self.__valid_grpc_encodings[0]
except:
grpc_encoding = self.__valid_grpc_encodings[0]
text_iter = format_grpc(
data=data,
parser_options=self.config.parser_options,
compression_scheme=grpc_encoding,
rules=applicabble_rules
)
title = "gRPC"
else:
text_iter = format_pbuf(
message=data,
parser_options=self.config.parser_options,
rules=applicabble_rules
)
title = "Protobuf (flattened)"
# hacky bugfix, see description above generator functions format_pbuf/format_grpc
try:
text_iter = hack_generator_to_list(text_iter)
except Exception as e:
# hook to log exception tracebacks on iterators
# import traceback
# ctx.log.warn("gRPC contentview: {}".format(traceback.format_exc()))
raise e
return title, text_iter
def render_priority(
self,
data: bytes,
*,
content_type: Optional[str] = None,
flow: Optional[flow.Flow] = None,
http_message: Optional[http.Message] = None,
**unknown_metadata,
) -> float:
if bool(data) and content_type in self.__content_types_grpc:
return 1
if bool(data) and content_type in self.__content_types_pb:
# replace existing protobuf renderer preference (adjust by option)
return 1.5
else:
return 0
|
the-stack_0_24749
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long, consider-using-f-string
import time
from msrest import Deserializer
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.profiles import ResourceType
from azure.cli.command_modules.acr._constants import get_acr_task_models
from azure.core.polling import PollingMethod, LROPoller
def get_run_with_polling(cmd,
client,
run_id,
registry_name,
resource_group_name):
deserializer = Deserializer(
{k: v for k, v in get_acr_task_models(cmd).__dict__.items() if isinstance(v, type)})
def deserialize_run(response):
return deserializer('Run', response)
return LROPoller(
client=client,
initial_response=client.get(
resource_group_name, registry_name, run_id, cls=lambda x, y, z: x),
deserialization_callback=deserialize_run,
polling_method=RunPolling(
cmd=cmd,
registry_name=registry_name,
run_id=run_id
))
class RunPolling(PollingMethod): # pylint: disable=too-many-instance-attributes
def __init__(self, cmd, registry_name, run_id, timeout=30):
self._cmd = cmd
self._registry_name = registry_name
self._run_id = run_id
self._timeout = timeout
self._client = None
self._response = None # Will hold latest received response
self._url = None # The URL used to get the run
self._deserialize = None # The deserializer for Run
self.operation_status = ""
self.operation_result = None
def initialize(self, client, initial_response, deserialization_callback):
self._client = client._client # pylint: disable=protected-access
self._response = initial_response
self._url = initial_response.http_request.url
self._deserialize = deserialization_callback
self._set_operation_status(initial_response)
def run(self):
while not self.finished():
time.sleep(self._timeout)
self._update_status()
if self.operation_status not in get_succeeded_run_status(self._cmd):
from knack.util import CLIError
raise CLIError("The run with ID '{}' finished with unsuccessful status '{}'. "
"Show run details by 'az acr task show-run -r {} --run-id {}'. "
"Show run logs by 'az acr task logs -r {} --run-id {}'.".format(
self._run_id,
self.operation_status,
self._registry_name,
self._run_id,
self._registry_name,
self._run_id
))
def status(self):
return self.operation_status
def finished(self):
return self.operation_status in get_finished_run_status(self._cmd)
def resource(self):
return self.operation_result
def _set_operation_status(self, response):
if response.http_response.status_code == 200:
self.operation_result = self._deserialize(response)
self.operation_status = self.operation_result.status
return
raise CloudError(response)
def _update_status(self):
self._response = self._client._pipeline.run( # pylint: disable=protected-access
self._client.get(self._url), stream=False)
self._set_operation_status(self._response)
def get_succeeded_run_status(cmd):
RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs')
return [RunStatus.succeeded.value]
def get_finished_run_status(cmd):
RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs')
return [RunStatus.succeeded.value,
RunStatus.failed.value,
RunStatus.canceled.value,
RunStatus.error.value,
RunStatus.timeout.value]
|
the-stack_0_24750
|
"""
This is a pure python implementation of the shell sort algorithm
For doctests run following command:
python -m doctest -v shell_sort.py
or
python3 -m doctest -v shell_sort.py
For manual testing run:
python shell_sort.py
"""
from __future__ import print_function
def shell_sort(collection):
"""Pure implementation of shell sort algorithm in Python
:param collection: Some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
>>> shell_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> shell_sort([])
[]
>>> shell_sort([-2, -5, -45])
[-45, -5, -2]
"""
# Marcin Ciura's gap sequence
gaps = [701, 301, 132, 57, 23, 10, 4, 1]
for gap in gaps:
i = gap
while i < len(collection):
temp = collection[i]
j = i
while j >= gap and collection[j - gap] > temp:
collection[j] = collection[j - gap]
j -= gap
collection[j] = temp
i += 1
return collection
if __name__ == '__main__':
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
user_input = raw_input('Enter numbers separated by a comma:\n').strip()
unsorted = [int(item) for item in user_input.split(',')]
print(shell_sort(unsorted))
|
the-stack_0_24751
|
# -*- coding: utf-8 -*-
PLAYSTORE_URL = 'https://play.google.com/store'
BASE_URL = PLAYSTORE_URL + '/apps'
SUGGESTION_URL = 'https://market.android.com/suggest/SuggRequest'
SEARCH_URL = PLAYSTORE_URL + '/search'
REVIEW_URL = PLAYSTORE_URL + '/getreviews'
CONCURRENT_REQUESTS = 10
USER_AGENT = (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/45.0.2454.101 Safari/537.36"
)
# Number of results to retrieve from a collection. Range(1 - 120)
NUM_RESULTS = 60
# Number of results to retrieve from a developer
DEV_RESULTS = 24
# Number of results to retrieve from similar. Range (1 - 60)
SIMILAR_RESULTS = 24
# pagTok post data strings to paginate through search results
PAGE_TOKENS = {
0: "",
1: "GAEiAggU:S:ANO1ljLtUJw",
2: "GAEiAggo:S:ANO1ljIeRQQ",
3: "GAEiAgg8:S:ANO1ljIM1CI",
4: "GAEiAghQ:S:ANO1ljLxWBY",
5: "GAEiAghk:S:ANO1ljJkC4I",
6: "GAEiAgh4:S:ANO1ljJfGC4",
7: "GAEiAwiMAQ==:S:ANO1ljL7Yco",
8: "GAEiAwigAQ==:S:ANO1ljLMTko",
9: "GAEiAwi0AQ==:S:ANO1ljJ2maA",
10: "GAEiAwjIAQ==:S:ANO1ljIG2D4",
11: "GAEiAwjcAQ==:S:ANO1ljJ9Wk0",
12: "GAEiAwjwAQ==:S:ANO1ljLFcVI",
}
# Regex to find page tokens within scrip tags
TOKEN_RE = r"GAEiA[\w=]{3,7}:S:ANO1lj[\w]{5}"
|
the-stack_0_24752
|
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["digitalio", "busio"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Show the docstring from both the class and its __init__() method.
autoclass_content = "both"
# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_member_order
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "CircuitPython ansi_escape_code Library"
copyright = "2022 Stefan Krüger"
author = "Stefan Krüger"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
".env",
"CODE_OF_CONDUCT.md",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "CircuitPython_Ansi_escape_code_Librarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"CircuitPython_ansi_escape_code_Library.tex",
"CircuitPython ansi_escape_code Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"CircuitPython_ansi_escape_code_Library",
"CircuitPython ansi_escape_code Library Documentation",
[author],
1,
),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"CircuitPython_ansi_escape_code_Library",
"CircuitPython ansi_escape_code Library Documentation",
author,
"CircuitPython_ansi_escape_code_Library",
"One line description of project.",
"Miscellaneous",
),
]
|
the-stack_0_24754
|
# simple Helmholtz equation
from firedrake import *
mesh = UnitSquareMesh(10, 10)
V = FunctionSpace(mesh, "CG", 1)
u = TrialFunction(V)
v = TestFunction(V)
f = Function(V)
x, y = SpatialCoordinate(mesh)
f.interpolate((1+8*pi*pi)*cos(x*pi*2)*cos(y*pi*2))
a = (dot(grad(v), grad(u)) + v * u) * dx
L = f * v * dx
u = Function(V)
solve(a == L, u,
solver_parameters={'ksp_converged_reason': None,
'ksp_type': 'cg',
'pc_type': 'icc'},
options_prefix='s')
|
the-stack_0_24756
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachineScaleSetVM(Resource):
"""Describes a virtual machine scale set virtual machine.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:ivar instance_id: The virtual machine instance ID.
:vartype instance_id: str
:ivar sku: The virtual machine SKU.
:vartype sku: ~azure.mgmt.compute.v2017_12_01.models.Sku
:ivar latest_model_applied: Specifies whether the latest model has been
applied to the virtual machine.
:vartype latest_model_applied: bool
:ivar vm_id: Azure VM unique ID.
:vartype vm_id: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineInstanceView
:param hardware_profile: Specifies the hardware settings for the virtual
machine.
:type hardware_profile:
~azure.mgmt.compute.v2017_12_01.models.HardwareProfile
:param storage_profile: Specifies the storage settings for the virtual
machine disks.
:type storage_profile:
~azure.mgmt.compute.v2017_12_01.models.StorageProfile
:param os_profile: Specifies the operating system settings for the virtual
machine.
:type os_profile: ~azure.mgmt.compute.v2017_12_01.models.OSProfile
:param network_profile: Specifies the network interfaces of the virtual
machine.
:type network_profile:
~azure.mgmt.compute.v2017_12_01.models.NetworkProfile
:param diagnostics_profile: Specifies the boot diagnostic settings state.
<br><br>Minimum api-version: 2015-06-15.
:type diagnostics_profile:
~azure.mgmt.compute.v2017_12_01.models.DiagnosticsProfile
:param availability_set: Specifies information about the availability set
that the virtual machine should be assigned to. Virtual machines specified
in the same availability set are allocated to different nodes to maximize
availability. For more information about availability sets, see [Manage
the availability of virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
<br><br> For more information on Azure planned maintainance, see [Planned
maintenance for virtual machines in
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set.
:type availability_set: ~azure.mgmt.compute.v2017_12_01.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param license_type: Specifies that the image or disk that is being used
was licensed on-premises. This element is only used for images that
contain the Windows Server operating system. <br><br> Possible values are:
<br><br> Windows_Client <br><br> Windows_Server <br><br> If this element
is included in a request for an update, the value must match the initial
value. This value cannot be updated. <br><br> For more information, see
[Azure Hybrid Use Benefit for Windows
Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Minimum api-version: 2015-06-15
:type license_type: str
:param plan: Specifies information about the marketplace image used to
create the virtual machine. This element is only used for marketplace
images. Before you can use a marketplace image from an API, you must
enable the image for programmatic use. In the Azure portal, find the
marketplace image that you want to use and then click **Want to deploy
programmatically, Get Started ->**. Enter any required information and
then click **Save**.
:type plan: ~azure.mgmt.compute.v2017_12_01.models.Plan
:ivar resources: The virtual machine child extension resources.
:vartype resources:
list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineExtension]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'instance_id': {'readonly': True},
'sku': {'readonly': True},
'latest_model_applied': {'readonly': True},
'vm_id': {'readonly': True},
'instance_view': {'readonly': True},
'provisioning_state': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'latest_model_applied': {'key': 'properties.latestModelApplied', 'type': 'bool'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
}
def __init__(self, location, tags=None, hardware_profile=None, storage_profile=None, os_profile=None, network_profile=None, diagnostics_profile=None, availability_set=None, license_type=None, plan=None):
super(VirtualMachineScaleSetVM, self).__init__(location=location, tags=tags)
self.instance_id = None
self.sku = None
self.latest_model_applied = None
self.vm_id = None
self.instance_view = None
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.os_profile = os_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.provisioning_state = None
self.license_type = license_type
self.plan = plan
self.resources = None
|
the-stack_0_24761
|
# -*- coding: utf-8 -*-
"""
Secret Sharing
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
from random import randint
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def mod_inverse(k, prime):
k = k % prime
if k < 0:
r = egcd(prime, -k)[2]
else:
r = egcd(prime, k)[2]
return (prime + r) % prime
def random_polynomial(degree, intercept, upper_bound):
""" Generates a random polynomial with positive coefficients.
"""
if degree < 0:
raise ValueError('Degree must be a non-negative number.')
coefficients = [intercept]
for i in range(degree):
random_coeff = randint(0, upper_bound-1)
coefficients.append(random_coeff)
return coefficients
def get_polynomial_points(coefficients, num_points, prime):
""" Calculates the first n polynomial points.
[ (1, f(1)), (2, f(2)), ... (n, f(n)) ]
"""
points = []
for x in range(1, num_points+1):
# start with x=1 and calculate the value of y
y = coefficients[0]
# calculate each term and add it to y, using modular math
for i in range(1, len(coefficients)):
exponentiation = (x**i) % prime
term = (coefficients[i] * exponentiation) % prime
y = (y + term) % prime
# add the point to the list of points
points.append((x, y))
return points
def modular_lagrange_interpolation(x, points, prime):
# break the points up into lists of x and y values
x_values, y_values = zip(*points)
# initialize f(x) and begin the calculation: f(x) = SUM( y_i * l_i(x) )
f_x = 0
for i in range(len(points)):
# evaluate the lagrange basis polynomial l_i(x)
numerator, denominator = 1, 1
for j in range(len(points)):
# don't compute a polynomial fraction if i equals j
if i == j:
continue
# compute a fraction and update the existing numerator + denominator
numerator = (numerator * (x - x_values[j])) % prime
denominator = (denominator * (x_values[i] - x_values[j])) % prime
# get the polynomial from the numerator + mod inverse of the denominator
lagrange_polynomial = numerator * mod_inverse(denominator, prime)
# multiply the current y and the evaluated polynomial and add it to f(x)
f_x = (prime + f_x + (y_values[i] * lagrange_polynomial)) % prime
return f_x
def secret_int_to_points(secret_int, point_threshold, num_points, prime):
""" Split a secret (integer) into shares (pair of integers / x,y coords).
Sample the points of a random polynomial with the y intercept equal to
the secret int.
"""
if point_threshold < 2:
raise ValueError("Threshold must be >= 2.")
if point_threshold > num_points:
raise ValueError("Threshold must be < the total number of points.")
if secret_int > prime:
raise ValueError("Error! Secret is too long for share calculation!")
coefficients = random_polynomial(point_threshold-1, secret_int, prime)
points = get_polynomial_points(coefficients, num_points, prime)
return points
def points_to_secret_int(points, prime):
""" Join int points into a secret int.
Get the intercept of a random polynomial defined by the given points.
"""
if not isinstance(points, list):
raise ValueError("Points must be in list form.")
for point in points:
if not isinstance(point, tuple) and len(point) == 2:
raise ValueError("Each point must be a tuple of two values.")
if not isinstance(point[0], int) and \
isinstance(point[1], int):
raise ValueError("Each value in the point must be an int.")
x_values, y_values = zip(*points)
free_coefficient = modular_lagrange_interpolation(0, points, prime)
secret_int = free_coefficient # the secret int is the free coefficient
return secret_int
|
the-stack_0_24762
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
OAUTH_SCOPE = ['https://www.googleapis.com/auth/cloud-platform']
OAUTH_SCOPES = [
'https://www.googleapis.com/auth/devstorage.read_only',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/monitoring.write']
def test_defaults(plan_runner):
"Test resources created with variable defaults."
_, resources = plan_runner(FIXTURES_DIR)
assert len(resources) == 1
node_config = resources[0]['values']['node_config'][0]
assert node_config['oauth_scopes'] == OAUTH_SCOPES
assert 'service_account' not in node_config
def test_external_sa(plan_runner):
"Test resources created with externally managed sa."
_, resources = plan_runner(
FIXTURES_DIR, node_service_account='[email protected]')
assert len(resources) == 1
node_config = resources[0]['values']['node_config'][0]
assert node_config['oauth_scopes'] == OAUTH_SCOPES
assert node_config['service_account'] == '[email protected]'
def test_external_scopes(plan_runner):
"Test resources created with externally defined scopes."
oauth_scopes = '["https://www.googleapis.com/auth/cloud-platform"]'
_, resources = plan_runner(
FIXTURES_DIR, node_service_account_scopes=oauth_scopes)
assert len(resources) == 1
node_config = resources[0]['values']['node_config'][0]
assert node_config['oauth_scopes'] == OAUTH_SCOPE
assert 'service_account' not in node_config
def test_internal_sa(plan_runner):
"Test resources created with internally managed sa."
_, resources = plan_runner(FIXTURES_DIR, node_service_account_create='true')
assert len(resources) == 2
node_config = resources[0]['values']['node_config'][0]
assert node_config['oauth_scopes'] == OAUTH_SCOPE
assert 'service_account' not in node_config
|
the-stack_0_24764
|
'''VGGFace models for Keras.
# Reference:
- [Deep Face Recognition](http://www.robots.ox.ac.uk/~vgg/publications/2015/Parkhi15/parkhi15.pdf)
- [VGGFace2: A dataset for recognising faces across pose and age](http://www.robots.ox.ac.uk/~vgg/data/vgg_face2/vggface2.pdf)
'''
from __future__ import print_function
import tensorflow
from keras_vggface.models import RESNET50, VGG16, SENET50
def VGGFace(include_top=True, model='vgg16', weights='vggface',
input_tensor=None, input_shape=None,
pooling=None,
classes=None):
"""Instantiates the VGGFace architectures.
Optionally loads weights pre-trained
on VGGFace datasets. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "vggface" (pre-training on VGGFACE datasets).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
model: selects the one of the available architectures
vgg16, resnet50 or senet50 default is vgg16.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'vggface', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `vggface`'
'(pre-training on VGGFace Datasets).')
if model == 'vgg16':
if classes is None:
classes = 2622
if weights == 'vggface' and include_top and classes != 2622:
raise ValueError(
'If using `weights` as vggface original with `include_top`'
' as true, `classes` should be 2622')
return VGG16(include_top=include_top, input_tensor=input_tensor,
input_shape=input_shape, pooling=pooling,
weights=weights,
classes=classes)
if model == 'resnet50':
if classes is None:
classes = 8631
if weights == 'vggface' and include_top and classes != 8631:
raise ValueError(
'If using `weights` as vggface original with `include_top`'
' as true, `classes` should be 8631')
return RESNET50(include_top=include_top, input_tensor=input_tensor,
input_shape=input_shape, pooling=pooling,
weights=weights,
classes=classes)
if model == 'senet50':
if classes is None:
classes = 8631
if weights == 'vggface' and include_top and classes != 8631:
raise ValueError(
'If using `weights` as vggface original with `include_top`'
' as true, `classes` should be 8631')
return SENET50(include_top=include_top, input_tensor=input_tensor,
input_shape=input_shape, pooling=pooling,
weights=weights,
classes=classes)
|
the-stack_0_24766
|
# © MNELAB developers
#
# License: BSD (3-clause)
import math
import matplotlib.pyplot as plt
import numpy as np
from mne.stats import permutation_cluster_1samp_test as pcluster_test
from mne.time_frequency import tfr_multitaper
from mne.viz import plot_compare_evokeds
def _center_cmap(cmap, vmin, vmax, name="cmap_centered"):
"""
Center given colormap (ranging from vmin to vmax) at value 0.
Taken from MNE-Python v0.24, as it will be removed in MNE-Python v1.0.
Parameters
----------
cmap : matplotlib.colors.Colormap
The colormap to center around 0.
vmin : float
Minimum value in the data to map to the lower end of the colormap.
vmax : float
Maximum value in the data to map to the upper end of the colormap.
name : str
Name of the new colormap. Defaults to 'cmap_centered'.
Returns
-------
cmap_centered : matplotlib.colors.Colormap
The new colormap centered around 0.
Notes
-----
This function can be used in situations where vmin and vmax are not symmetric around
zero. Normally, this results in the value zero not being mapped to white anymore in many
colormaps. Using this function, the value zero will be mapped to white even for
asymmetric positive and negative value ranges. Note that this could also be achieved by
re-normalizing a given colormap by subclassing matplotlib.colors.Normalize as described
here:
https://matplotlib.org/users/colormapnorms.html#custom-normalization-two-linear-ranges
"""
from matplotlib.colors import LinearSegmentedColormap
vzero = abs(vmin) / float(vmax - vmin)
index_old = np.linspace(0, 1, cmap.N)
index_new = np.hstack([
np.linspace(0, vzero, cmap.N // 2, endpoint=False),
np.linspace(vzero, 1, cmap.N // 2),
])
colors = ("red", "green", "blue", "alpha")
cdict = {name: [] for name in colors}
for old, new in zip(index_old, index_new):
for color, name in zip(cmap(old), colors):
cdict[name].append((new, color, color))
return LinearSegmentedColormap(name, cdict)
def _get_rows_cols(n):
if n <= 3:
rows, cols = 1, n
else:
rows = round(math.sqrt(n))
cols = math.ceil(math.sqrt(n))
return rows, cols
def _calc_tfr(epochs, freqs, baseline, times, alpha=None):
"""
Calculate AverageTFR and significance masks for given epochs.
Adapted from https://mne.tools/dev/auto_examples/time_frequency/time_frequency_erds.html
Parameters
----------
epochs : mne.epochs.Epochs
Epochs extracted from a Raw instance.
freqs : np.ndarray
The frequencies in Hz.
baseline : array_like, shape (2,)
The time interval to apply rescaling / baseline correction.
times : array_like, shape (2,)
Start and end of crop time interval.
alpha : float, optional
If specified, calculate significance maps with threshold `alpha`, by default `None`.
Returns
-------
dict[str, tuple[mne.time_frequency.tfr.EpochsTFR, dict[str, np.ndarray | None]]]
A dictionary where keys are event IDs and values are tuples (`tfr_ev`, `masks`).
`tfr_ev` is the EpochsTFR object for the respective event. `masks` is again a
dictionary, where keys are channel names and values are significance masks.
Significance masks are `None` if `alpha` was not specified.
"""
tfr = tfr_multitaper(epochs, freqs, freqs, average=False, return_itc=False)
tfr.apply_baseline(baseline, mode="percent")
tfr.crop(*times)
pcluster_kwargs = dict(
n_permutations=100,
step_down_p=0.05,
seed=1,
buffer_size=None,
out_type='mask',
)
res = {}
for event in epochs.event_id:
tfr_ev = tfr[event]
masks = {}
for ch in range(epochs.info["nchan"]):
mask = None
if alpha is not None:
# positive clusters
_, c1, p1, _ = pcluster_test(tfr_ev.data[:, ch], tail=1, **pcluster_kwargs)
# negative clusters
_, c2, p2, _ = pcluster_test(tfr_ev.data[:, ch], tail=-1, **pcluster_kwargs)
c = np.stack(c1 + c2, axis=2) # combined clusters
p = np.concatenate((p1, p2)) # combined p-values
mask = c[..., p <= alpha].any(axis=-1)
masks[epochs.ch_names[ch]] = mask
res[event] = (tfr_ev, masks)
return res
def plot_erds(tfr_and_masks):
"""
Plot ERDS maps from given TFR and significance masks.
Parameters
----------
tfr_and_masks : dict[str, tuple[EpochsTFR, dict[str, np.ndarray |None]]]
A dictionary where keys are event IDs and values are tuples (`tfr_ev`, `masks`).
`tfr_ev` is the EpochsTFR object for the respective event. `masks` is again a
dictionary, where keys are channel names and values are significance masks.
Returns
-------
list[matplotlib.figure.Figure]
A list of the figure(s) generated, one figure per event.
"""
figs = []
for event, (tfr_ev, masks) in tfr_and_masks.items():
n_rows, n_cols = _get_rows_cols(tfr_ev.info["nchan"])
widths = n_cols * [10] + [1] # each map has width 10, each colorbar width 1
fig, axes = plt.subplots(n_rows, n_cols + 1, gridspec_kw={"width_ratios": widths})
vmin, vmax = -1, 2 # default for ERDS maps
cmap = _center_cmap(plt.cm.RdBu, vmin, vmax)
# skip the last column in `axes`, as it contains the colorbar
for (ch_name, mask), ax in zip(masks.items(), axes[..., :-1].flat):
tfr_ev.average().plot(
[ch_name],
vmin=vmin,
vmax=vmax,
cmap=(cmap, False),
axes=ax,
colorbar=False,
mask=mask,
mask_style="mask" if mask is not None else None, # avoid RuntimeWarning
show=False,
)
ax.set_title(ch_name, fontsize=10)
ax.axvline(0, linewidth=1, color="black", linestyle=":")
ax.set(xlabel="t (s)", ylabel="f (Hz)")
ax.label_outer()
for ax in axes[..., -1].flat:
fig.colorbar(axes.flat[0].images[-1], cax=ax)
fig.suptitle(f"ERDS – {event}")
figs.append(fig)
return figs
def plot_erds_topomaps(epochs, events, freqs, baseline, times):
"""
Plot ERDS topomaps, one figure per event.
Parameters
----------
epochs : mne.epochs.Epochs
Epochs extracted from a Raw instance.
events : list[str]
Events to include.
freqs : np.ndarray
Array of frequencies over which the average is taken.
baseline : tuple[float, float]
Start and end times for baseline correction.
times : tuple[float, float]
Start and end times between which the average is taken.
Returns
-------
list[matplotlib.figure.Figure]
A list of the figure(s) generated.
"""
vmin, vmax = -1, 2
cmap = _center_cmap(plt.cm.RdBu, vmin, vmax)
figs = []
for event in events:
tfr = tfr_multitaper(epochs[event], freqs, freqs, average=True, return_itc=False)
tfr.apply_baseline(baseline, mode="percent")
tfr.crop(*times)
fig = tfr.plot_topomap(
title=f"Event: {event}",
unit="ERDS",
vmin=vmin,
vmax=vmax,
cmap=cmap,
cbar_fmt="%.1f",
)
fig.set_size_inches(4, 3)
fig.set_tight_layout(True)
figs.append(fig)
return figs
def plot_evoked(
epochs,
picks,
events,
gfp,
spatial_colors,
topomap_times,
):
"""
Plot evoked potentials of different events for individual channels.
If multiple events are selected, one figure will be returned for each.
Parameters
----------
epochs : mne.epochs.Epochs
Epochs extracted from a Raw instance.
picks : list[str]
Channels to include.
events : list[str]
Events to include.
gfp : bool | "only"
Plot the global field power (GFP).
spatial_colors : bool
If `True`, the lines are color coded by mapping physical sensor
coordinates into color values. Spatially similar channels will have
similar colors. Bad channels will be dotted. If `False`, the good
channels are plotted black and bad channels red.
topomap_times : list[float] | "auto" | "peaks"
The time point(s) to plot. If `"auto"`, 5 evenly spaced topographies
between the first and last time instant will be shown. If `"peaks"`,
finds time points automatically by checking for 3 local maxima in
Global Field Power.
Returns
-------
list[matplotlib.figure.Figure]
A list of the figure(s) generated.
"""
figs = []
for event in events:
evoked = epochs[event].average(picks=picks)
if topomap_times:
figs.append(evoked.plot_joint(
times=topomap_times,
title=f'Event: {event}',
picks=picks,
ts_args={
"spatial_colors": spatial_colors,
"gfp": gfp,
}
))
else:
figs.append(evoked.plot(
window_title=f'Event: {event}',
picks=picks,
spatial_colors=spatial_colors,
gfp=gfp
))
return figs
def plot_evoked_comparison(
epochs,
picks,
events,
average_method,
combine,
confidence_intervals,
):
"""
Plot evoked potentials of different events averaged over channels.
If multiple channel types are selected, one figure will be returned for
each channel type.
Parameters
----------
epochs : mne.epochs.Epochs
Epochs extracted from a Raw instance.
picks : list[str]
Channels to include.
events : list[str]
Events to include.
average_method : {"mean", "median"}
How to combine the data during averaging.
combine : {"gfp", "std", mean", "median"}
How to combine information across channels.
confidence_intervals : bool
If `True`, plot confidence intervals as shaded areas.
Returns
-------
list[matplotlib.figure.Figure]
A list of the figure(s) generated.
"""
if confidence_intervals:
evokeds = {e: list(epochs[e].iter_evoked()) for e in events}
else:
evokeds = {e: epochs[e].average(picks=picks, method=average_method, by_event_type=True) for e in events} # noqa: E501
return plot_compare_evokeds(evokeds, picks=picks, combine=combine)
def plot_evoked_topomaps(epochs, events, average_method, times):
"""
Plot evoked topomaps.
One figure is generated for each event.
Parameters
----------
epochs : mne.epochs.Epochs
Epochs extracted from a Raw instance.
events : list[str]
Events to include.
average_method : "mean" | "median
How to average epochs.
times : list[float] | "auto" | "peaks" | "interactive"
The time point(s) to plot.
Returns
-------
list[matplotlib.figure.Figure]
A list of the figure(s) generated.
"""
figs = []
for event in events:
evoked = epochs[event].average(method=average_method)
figs.append(evoked.plot_topomap(times, title=f'Event: {event}'))
if times == 'interactive':
figs[-1].set_size_inches(6, 4)
return figs
|
the-stack_0_24767
|
from unittest import TestCase
from atsd_client.models import Decompose, DecomposeMethod
LIMIT = 1
METHOD = DecomposeMethod.AUTO
WINDOW_LENGTH = 1
SINGULAR_VALUE_THRESHOLD = 1
INCORRECT_VALUE = "INCORRECT_VALUE"
class TestDecompose(TestCase):
def test_init(self):
decompose = Decompose(LIMIT, METHOD, WINDOW_LENGTH, SINGULAR_VALUE_THRESHOLD)
self.assertEqual(LIMIT, decompose.eigentripleLimit)
self.assertEqual(METHOD, decompose.method)
self.assertEqual(WINDOW_LENGTH, decompose.windowLength)
self.assertEqual(SINGULAR_VALUE_THRESHOLD, decompose.singularValueThreshold)
def test_set_eigentriple_limit(self):
decompose = Decompose()
decompose.set_eigentriple_limit(LIMIT)
self.assertEqual(LIMIT, decompose.eigentripleLimit)
self.assertRaises(ValueError, decompose.set_eigentriple_limit, INCORRECT_VALUE)
def test_set_method(self):
decompose = Decompose()
decompose.set_method(METHOD)
self.assertEqual(METHOD, decompose.method)
self.assertRaises(ValueError, decompose.set_method, INCORRECT_VALUE)
def test_set_window_length(self):
decompose = Decompose()
decompose.set_window_length(WINDOW_LENGTH)
self.assertEqual(WINDOW_LENGTH, decompose.windowLength)
self.assertRaises(ValueError, decompose.set_window_length, INCORRECT_VALUE)
def test_set_singular_value_threshold(self):
decompose = Decompose()
decompose.set_singular_value_threshold(SINGULAR_VALUE_THRESHOLD)
self.assertEqual(SINGULAR_VALUE_THRESHOLD, decompose.singularValueThreshold)
self.assertRaises(ValueError, decompose.set_singular_value_threshold, INCORRECT_VALUE)
|
the-stack_0_24768
|
from PIL import Image
import io
import time
import matplotlib.pyplot as plt
from decoder import Decoder
from channel import Channel
from coder import Coder
from generator import Generator
import random
from bitarray import bitarray
import bitarray.util as b_util
from komm import BCHCode
import numpy as np
from bch import BCH
from bch_params import bch_code_parameters
import sys
np.set_printoptions(threshold=sys.maxsize)
def error_factor(generated_array, decoded_array):
if(len(generated_array) != len(decoded_array)):
return None
error_counter = 0
for i in range(len(generated_array)):
if(generated_array[i] != decoded_array[i]):
error_counter += 1
return error_counter/len(generated_array)*100
def how_many_distortions(generated_array, decoded_array):
if(len(generated_array) != len(decoded_array)):
return None
error_counter = 0
for i in range(len(generated_array)):
if(generated_array[i] != decoded_array[i]):
error_counter += 1
return error_counter
def fill_with_zeros(array, n):
filled_array = array.copy()
for i in range(n-len(array)):
filled_array.append(0)
return filled_array
def main():
start = time.time()
# a = []
# b = []
# for i in range(0, 30):
generated_array = Generator().populate_array(2**18)
# print(generated_array)
coded_array = Coder().triple_code(generated_array)
# print(coded_array)
# a.append(i / 100)
# distorted_array = Channel(i/100).distort(coded_array)
distorted_array = Channel(0.15).distort(coded_array)
# print(distorted_array)
decoded_array = Decoder().decode(distorted_array)
# print(decoded_array)
print(error_factor(generated_array, decoded_array))
# b.append(error_factor(generated_array, decoded_array))
# print(a, b)
# plt.plot(a, b)
# plt.title("Error percentage depending on the probability p")
# plt.xlabel("Probability of error p")
# plt.ylabel("Error factor in %")
# plt.show()
end = time.time()
print("Time:", end-start)
"""
Etap II
"""
print("Stage 2")
# 2048
bch = BCH()
a = []
b = []
error = 0
sent_msg = b_util.urandom(512)
m = 8
t = 63
k = 9
# 7_21_29
# received_msg = BCH.code(sent_msg, m, t, k, 0.3)
# for i in range(0, 20, 1):
# for j in range(10):
# received_msg = BCH.code(sent_msg, 3, 1, 4, i/100)
# filled_array = fill_with_zeros(sent_msg, len(received_msg))
# # print(len(example_bit_array), len(received_msg))
# # # print(received_msg)
# # print("Error [%] - decoded msg: ", error_factor(example_bit_array, received_msg))
# error += error_factor(filled_array, received_msg)
# error /= 10
# a.append(i/100)
# b.append(error)
# print(a, b)
# plt.plot(a, b)
# plt.title(
# f"Error percentage depending on the probability p \nfor m = {m} k = {k} t = {t}")
# plt.xlabel("Probability of error p")
# plt.ylabel("Error factor in %")
# plt.show()
for i in range(0,20,3):
counter = 0
for m in bch_code_parameters:
for t in bch_code_parameters[m]:
counter +=1
received_msg = BCH.code(sent_msg, m, t, bch_code_parameters[m][t], i/100)
filled_array = fill_with_zeros(sent_msg, len(received_msg))
# print(len(example_bit_array), len(received_msg))
# # print(received_msg)
# print("Error [%] - decoded msg: ", error_factor(example_bit_array, received_msg))
error += error_factor(filled_array, received_msg)
a.append(i/100)
b.append(error/counter)
error = 0
print(counter)
print(a, b)
plt.plot(a, b)
plt.title("Error percentage depending on the probability p")
plt.xlabel("Probability of error p")
plt.ylabel("Error factor in %")
plt.show()
if __name__ == '__main__':
main()
# print("Input np array:", np_array, "Length:", len(np_array))
# print("Error [%] - decoded msg: ", error_factor(np_array, decoded_msg))
# print("Distorted bits/ All bits: ",
# how_many_distortions(encoded_msg, distorted_msg), "/", len(encoded_msg))
# print("Error bits/ All bits: ",
# how_many_distortions(np_array, decoded_msg), "/", len(np_array))
# print("Error [%] - received msg:", error_factor(np_array, received_msg))
|
the-stack_0_24769
|
# qubit number=2
# total number=13
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += X(1) # number=8
prog += RX(-0.14765485471872042,1) # number=9
prog += SWAP(1,0) # number=2
prog += SWAP(1,0) # number=3
prog += X(1) # number=5
prog += H(1) # number=10
prog += CZ(0,1) # number=11
prog += H(1) # number=12
prog += RX(-2.73004401596953,1) # number=6
prog += Z(1) # number=4
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil373.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
the-stack_0_24771
|
import re
import torch
from torch import nn, einsum
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch import Tensor
from torch.jit.annotations import List
import numpy as np
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
class HSI_Tr(nn.Module):
def __init__(self, config):
super(HSI_Tr, self).__init__()
self.layers = config['layers']
self.feature_root = config['feature_root']
self.channels = config['channels']
self.n_class = config['n_class']
self.use_bn = config['use_bn']
self.track_running_stats = config['track_running_stats']
self.bn_momentum = config['bn_momentum']
self.conv_repeat = config['conv_repeat']
if config['loss'] == 'BCE':
self.loss_func = torch.nn.BCELoss()
else:
pass
self.preprocessBN = nn.BatchNorm2d(self.channels, track_running_stats=False)
self.down_sample_convs = torch.nn.ModuleDict()
# down sample conv layers
for layer in range(self.layers):
feature_number = self.feature_root * (2 ** layer)
if layer == 0:
self.down_sample_convs['down{}'.format(layer)] = nn.Sequential(
self.get_conv_block(1, feature_number, 'down{}'.format(layer)))
else:
self.down_sample_convs['down{}'.format(layer)] = nn.Sequential(
self.get_conv_block(feature_number // 2, feature_number, 'down{}'.format(layer)))
self.up_sample_convs = torch.nn.ModuleDict()
# up sample conv layers
for layer in range(self.layers - 2, -1, -1):
feature_number = self.feature_root * (2 ** layer)
self.up_sample_convs['up{}'.format(layer)] = nn.Sequential(
self.get_conv_block(feature_number * 3, feature_number, 'up{}'.format(layer)))
self.to_pixel_embedding = nn.Sequential(
Rearrange('b c s h w -> (b h w) s c')
)
self.transformer = Transformer(self.feature_root, 2, 4, 4, 4)
self.pos_embedding = nn.Parameter(torch.randn(1, self.channels, self.feature_root),requires_grad=True)
self.cls_token = nn.Parameter(torch.randn(1, 1, self.feature_root),requires_grad=True)
self.mlp_head = nn.Sequential(
nn.LayerNorm(self.feature_root),
nn.Linear(self.feature_root, self.feature_root)
)
self.to_image = Rearrange('b h w c -> b c h w')
self.predict_layer = nn.Sequential(OrderedDict([
('predict_conv', nn.Conv2d(32, self.n_class, kernel_size=3, stride=1, padding=1)),
# ('predict_smax', nn.Sigmoid()),
('predict_smax', nn.Softmax2d()),
]))
def forward(self, x):
x = self.preprocessBN(x)
x = x.unsqueeze(1)
x = F.max_pool3d(x,kernel_size=2)
# # convert x from 1x32x1024x1280 to 1x1x32x1024x1280
down_features = []
for layer in range(self.layers):
if layer == 0:
down_features.append(self.down_sample_convs['down{}'.format(layer)](x))
else:
x = F.max_pool3d(down_features[-1], kernel_size=(1,2,2))
down_features.append(self.down_sample_convs['down{}'.format(layer)](x))
up_features = []
for layer in range(self.layers - 2, -1, -1):
if layer == self.layers - 2:
_cat = torch.cat((down_features[layer], F.interpolate(down_features[layer + 1], scale_factor=(1,2,2))), 1)
else:
_cat = torch.cat((down_features[layer], F.interpolate(up_features[-1], scale_factor=(1,2,2))), 1)
up_features.append(self.up_sample_convs['up{}'.format(layer)](_cat))
x = up_features[-1]
x = self.to_pixel_embedding(x)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embedding[:, :(n + 1)]
x = self.transformer(x)
x = x.reshape((1, 512, 640, -1))
x = self.to_image(x)
logits = self.predict_layer(x)
logits = F.interpolate(logits,scale_factor=2)
return logits
def get_conv_block(self, in_feature, out_feature, prefix):
_return = OrderedDict()
for i in range(self.conv_repeat):
_return[prefix+'_conv{}'.format(i)] = nn.Conv3d(in_feature, out_feature, kernel_size=(1,3,3), stride=1, padding=(0,1,1))
in_feature = out_feature
if self.use_bn == True:
_return[prefix+'_norm{}'.format(i)] = nn.BatchNorm3d(out_feature, momentum=self.bn_momentum, track_running_stats=self.track_running_stats)
_return[prefix + '_relu{}'.format(i)] = nn.ReLU(inplace=True)
return _return
def get_loss(self, logits, batch_y):
return self.loss_func(logits, batch_y)
def get_predict(self, logits, thresh=True):
logits = logits.detach().cpu().numpy()
pred = logits[0, 1, :, :]
if thresh:
pred = np.where(pred > 0.5, 1, 0)
return pred
def get_gt(self, batch_y):
batch_y = batch_y.detach().cpu().numpy()
batch_y = batch_y[0, 1, :, :]
return np.where(batch_y > 0.5, 1, 0)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
|
the-stack_0_24772
|
"""
Utilities for gradescope autograding.
"""
import os
import json
from fractions import Fraction
from zipfile import ZipFile, ZIP_DEFLATED
from . import __version__ as ZUCCHINI_VERSION
from .constants import ASSIGNMENT_CONFIG_FILE, ASSIGNMENT_FILES_DIRECTORY
from .utils import ConfigDictMixin, ConfigDictNoMangleMixin, \
datetime_from_string, recursive_get_using_string
class GradescopeMetadata(object):
"""
Parse the metadata as described in:
https://gradescope-autograders.readthedocs.io/en/latest/submission_metadata/
"""
_ATTRS = [
('student_name', 'users.0.name', str),
('submission_date', 'created_at', datetime_from_string),
('due_date', 'assignment.due_date', datetime_from_string),
# The nested int(float(..)) deal is because int('100.0')
# explodes
('total_points', 'assignment.outline.0.weight',
lambda pts: int(float(pts))),
]
def __init__(self, json_dict):
for attr, key, type_ in self._ATTRS:
val = recursive_get_using_string(json_dict, key)
setattr(self, attr, type_(val))
@classmethod
def from_json_path(cls, json_path):
with open(json_path, 'r', errors='ignore') as json_fp:
return cls(json.load(json_fp))
class GradescopeAutograderTestOutput(ConfigDictNoMangleMixin, ConfigDictMixin):
"""
Output of a single test in Gradescope JSON.
"""
def __init__(self, name=None, score=None, max_score=None, output=None):
self.name = name
self.score = score
self.max_score = max_score
self.output = output
class GradescopeAutograderOutput(ConfigDictNoMangleMixin, ConfigDictMixin):
"""
Hold Gradescope Autograder output as described in
https://gradescope-autograders.readthedocs.io/en/latest/specs/#output-format
"""
def __init__(self, score=None, tests=None, extra_data=None):
self.score = score
self.tests = [GradescopeAutograderTestOutput.from_config_dict(test)
for test in tests] if tests is not None else None
self.extra_data = extra_data
def to_config_dict(self, *args):
dict_ = super(GradescopeAutograderOutput, self).to_config_dict(*args)
if dict_.get('tests', None):
dict_['tests'] = [test.to_config_dict() for test in dict_['tests']]
return dict_
@staticmethod
def _two_decimals(grade, frac):
"""Convert a fraction to string with two decimal points"""
return '{:.02f}'.format(grade.to_float(frac))
@classmethod
def from_grade(cls, grade):
"""
Convert a grading_manager.Grade to Gradescope JSON.
"""
score = grade.score()
tests = []
# Store the component grades in the extra_data field
extra_data = {'component_grades': grade.serialized_component_grades()}
computed_grade = grade.computed_grade()
# Add penalties
for penalty in computed_grade.penalties:
if penalty.points_delta != 0:
# Hack: Display -37 as 0/37 and +37 as 37/37
fake_max_score = cls._two_decimals(
grade, abs(penalty.points_delta))
fake_score = cls._two_decimals(grade, Fraction(0)) \
if penalty.points_delta < 0 else fake_max_score
test = GradescopeAutograderTestOutput(
name=penalty.name,
score=fake_score,
max_score=fake_max_score)
tests.append(test)
# Add actual test results
for component in computed_grade.components:
if component.error:
test = GradescopeAutograderTestOutput(
name=component.name,
score=cls._two_decimals(grade, component.points_got),
max_score=cls._two_decimals(
grade, component.points_possible),
output='{}\n{}'.format(component.error,
component.error_verbose or ''))
tests.append(test)
else:
for part in component.parts:
if part.deductions:
deductions = 'Deductions: {}\n\n'.format(
', '.join(part.deductions))
else:
deductions = ''
test = GradescopeAutograderTestOutput(
name='{}: {}'.format(component.name, part.name),
score=cls._two_decimals(grade, part.points_got),
max_score=cls._two_decimals(
grade, part.points_possible),
output=deductions + part.log)
tests.append(test)
return cls(score=score, tests=tests, extra_data=extra_data)
def to_json_stream(self, fp):
json.dump(self.to_config_dict(), fp)
SETUP_SH = r'''#!/bin/bash
# THIS FILE WAS GENERATED BY ZUCCHINI
set -e
cd /autograder/source
# Prevent apt from prompting for input and hanging the build
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y python3 python3-pip python3-wheel {prereqs}
pip3 install {pip_install_arg}
{extra_setup_commands}
'''
RUN_AUTOGRADER = r'''#!/bin/bash
# THIS FILE WAS GENERATED BY ZUCCHINI
set -e
set -o pipefail
cd /autograder/source
zucc flatten /autograder/submission
{grade_cmd_prefix}zucc grade-submission /autograder/submission \
| zucc gradescope bridge /autograder/submission_metadata.json \
> /autograder/results/results.json
'''
RUN_GRAPHICAL_SH = r'''#!/bin/bash
cat >xorg.conf <<'EOF'
# This xorg configuration file is meant to be used by xpra
# to start a dummy X11 server.
# For details, please see:
# https://xpra.org/Xdummy.html
Section "ServerFlags"
Option "DontVTSwitch" "true"
Option "AllowMouseOpenFail" "true"
Option "PciForceNone" "true"
Option "AutoEnableDevices" "false"
Option "AutoAddDevices" "false"
EndSection
Section "Device"
Identifier "dummy_videocard"
Driver "dummy"
Option "ConstantDPI" "true"
VideoRam 192000
EndSection
Section "Monitor"
Identifier "dummy_monitor"
HorizSync 5.0 - 1000.0
VertRefresh 5.0 - 200.0
Modeline "1024x768" 18.71 1024 1056 1120 1152 768 786 789 807
EndSection
Section "Screen"
Identifier "dummy_screen"
Device "dummy_videocard"
Monitor "dummy_monitor"
DefaultDepth 24
SubSection "Display"
Viewport 0 0
Depth 24
Modes "1024x768"
Virtual 1024 768
EndSubSection
EndSection
EOF
/usr/lib/xorg/Xorg -noreset -logfile ./xorg.log -config ./xorg.conf :69 \
>/dev/null 2>&1 &
xorg_pid=$!
export DISPLAY=:69
"$@"
exitcode=$?
kill "$xorg_pid" || {
printf 'did not kill Xorg!\n' >&2
exit 1
}
exit $exitcode'''
class GradescopeAutograderZip(object):
"""
Generates a Gradesope autograder zip file from which Gradescope
generates a Docker image for grading.
"""
def __init__(self, path='.', prerequisites=None, extra_setup_commands=None,
needs_display=False, wheel_path=None):
self.path = path
self.prerequisites = prerequisites or []
self.extra_setup_commands = extra_setup_commands or []
self.needs_display = needs_display
self.wheel_path = wheel_path
# Need this for
if self.needs_display:
prerequisites.append('xserver-xorg-video-dummy')
def _relative_path(self, abspath):
"""
Convert an absolute path to an assignment file to a path
relative to self.path.
"""
return os.path.relpath(abspath, self.path)
def _real_path(self, relpath):
"""
Convert a relative path to an assignment file to an absolute
path.
"""
return os.path.join(self.path, relpath)
def _write_file(self, file_path, zipfile, real_path=None):
"""
Add a file to the generated zip file. file_path is the
destination path in the .zip file. If real_path is not provided,
it will be self.path/file_path.
"""
if real_path is None:
real_path = self._real_path(file_path)
zipfile.write(real_path, file_path)
def _write_string(self, string, path, zipfile):
"""
Add a file to the generated zip file. file_path should be relative to
self.path.
"""
zipfile.writestr(path, string)
def _write_dir(self, dir_path, zipfile):
"""
Recursively add a directory to the generated zip file. dir_path
should be relative to self.path.
"""
real_path = self._real_path(dir_path)
for dirpath, _, filenames in os.walk(real_path):
for filename in filenames:
relpath = self._relative_path(os.path.join(dirpath, filename))
self._write_file(relpath, zipfile)
def write_zip(self, file):
"""
Write the autograder .zip to file. If file is a file-like
object, write it there, otherwise it should be a string
designating the destination path.
"""
with ZipFile(file, 'w', ZIP_DEFLATED) as zipfile:
self._write_file(ASSIGNMENT_CONFIG_FILE, zipfile)
grading_files = self._real_path(ASSIGNMENT_FILES_DIRECTORY)
if os.path.exists(grading_files):
self._write_dir(ASSIGNMENT_FILES_DIRECTORY, zipfile)
if self.needs_display:
self._write_string(RUN_GRAPHICAL_SH, 'run_graphical.sh',
zipfile)
grade_cmd_prefix = 'bash run_graphical.sh '
else:
grade_cmd_prefix = ''
run_autograder = RUN_AUTOGRADER.format(
grade_cmd_prefix=grade_cmd_prefix)
self._write_string(run_autograder, 'run_autograder', zipfile)
if self.wheel_path is None:
pip_install_arg = 'zucchini==' + ZUCCHINI_VERSION
else:
# Can't just name it `zucchini.whl' or something because
# this upsets pip
wheel_filename = os.path.basename(self.wheel_path)
self._write_file(wheel_filename, zipfile,
real_path=self.wheel_path)
pip_install_arg = wheel_filename
extra_setup_commands = '\n'.join(self.extra_setup_commands)
setup_sh = SETUP_SH.format(
pip_install_arg=pip_install_arg,
prereqs=' '.join(self.prerequisites),
extra_setup_commands=extra_setup_commands)
self._write_string(setup_sh, 'setup.sh', zipfile)
|
the-stack_0_24773
|
import pandas as pd
import numpy as np
class SentimentScoreDFGenerator:
"""
sentiment score is a df with negative/positive score of each sentence
This class must be integrated to preprocessor
"""
def __call__(self, text_df: pd.DataFrame, text_kw_df: pd.DataFrame):
"""
this method creates a df containing positive/negative score of each sentence.
:param text_df:
:param text_kw_df:
:return:
"""
if text_df is None:
return None
if text_kw_df is None:
sentiment_score_df = pd.DataFrame({"sidx": text_df.sidx.tolist()})
sentiment_score_df['nscore'] = 0
sentiment_score_df['pscore'] = 0
return sentiment_score_df
# get the sentiment score sscore of each sentence.
npdf = self.__sum_sentiment_scores(text_df, text_kw_df)
sentiment_score_df = npdf.iloc[:, 0:3]
print("\nSentiment score df\n{}".format(sentiment_score_df))
return sentiment_score_df
@classmethod
def __get_kwscores(cls, text_df, text_kw_df):
kwscore_df = pd.DataFrame(list(set(text_df.sidx)))
kwscore_df['sent_score'] = 0
kwscore_df.sent_score = kwscore_df.apply(
lambda row: sum([i for i in text_kw_df[text_kw_df.sidx == row.name].iscore]), axis=1
)
kwscore_df = kwscore_df.astype(object).replace(np.nan, 0.0)
kwscore_df['order_bonus'] = cls.__get_order_bonus(text_df)
kwscore_df['sent_kwscore'] = kwscore_df.sent_score * kwscore_df.order_bonus
return kwscore_df
@classmethod
def __sum_sentiment_scores(cls, text_df, text_kw_df):
# calculate for both negative and positive
npdf = pd.DataFrame({'sidx': list(set(text_df.sidx))})
npdf['nscore'] = 0
npdf['pscore'] = 0
# it used to only get the word belongs to subject "i" but temporary simplify it
for ridx, row in text_kw_df.iterrows():
if row.sscore > 0:
npdf.pscore[row.sidx] += row.sscore
else:
npdf.nscore[row.sidx] += row.sscore
return npdf
@staticmethod
def __get_order_bonus(text_df):
num_of_sentences = len(set(text_df.sidx.values))
if num_of_sentences == 0:
return [1]
margin = 0.5 / num_of_sentences
order_bonus = [1 + i * margin for i in range(0, num_of_sentences)]
return order_bonus
|
the-stack_0_24776
|
import pathlib
from typing import Any, Dict, Union, cast
import torch
from determined import experimental, util
from determined.pytorch import PyTorchTrial, PyTorchTrialContext
def load_model(
ckpt_dir: pathlib.Path, metadata: Dict[str, Any], **kwargs: Any
) -> Union[PyTorchTrial, torch.nn.Module]:
checkpoint = torch.load(ckpt_dir.joinpath("state_dict.pth"), **kwargs) # type: ignore
trial_cls, trial_context = experimental._load_trial_on_local(
ckpt_dir.joinpath("code"),
training=False,
config=metadata["experiment_config"],
hparams=metadata["hparams"],
)
trial_context = cast(PyTorchTrialContext, trial_context)
trial = cast(PyTorchTrial, trial_cls(trial_context))
if "model_state_dict" in checkpoint:
# Backward compatible with older checkpoint format.
model = trial.build_model()
model.load_state_dict(checkpoint["model_state_dict"])
return model
else:
# Backward compatible with older interface
if util.is_overridden(trial.build_model, PyTorchTrial):
model = trial.build_model()
model.load_state_dict(checkpoint["models_state_dict"][0])
return model
else:
for idx, model in enumerate(trial_context.models):
model.load_state_dict(checkpoint["models_state_dict"][idx])
return trial
|
the-stack_0_24777
|
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Pre-process right-to-left languages.
# You can use it either standalone, or through import_po_from_branches or
# update_trunk.
#
# Notes: This has been tested on Linux, not 100% it will work nicely on
# Windows or OsX.
# This uses ctypes, as there is no py3 binding for fribidi currently.
# This implies you only need the compiled C library to run it.
# Finally, note that it handles some formatting/escape codes (like
# \", %s, %x12, %.4f, etc.), protecting them from ugly (evil) fribidi,
# which seems completely unaware of such things (as unicode is...).
import sys
import ctypes
import re
#define FRIBIDI_MASK_NEUTRAL 0x00000040L /* Is neutral */
FRIBIDI_PAR_ON = 0x00000040
#define FRIBIDI_FLAG_SHAPE_MIRRORING 0x00000001
#define FRIBIDI_FLAG_REORDER_NSM 0x00000002
#define FRIBIDI_FLAG_SHAPE_ARAB_PRES 0x00000100
#define FRIBIDI_FLAG_SHAPE_ARAB_LIGA 0x00000200
#define FRIBIDI_FLAG_SHAPE_ARAB_CONSOLE 0x00000400
#define FRIBIDI_FLAG_REMOVE_BIDI 0x00010000
#define FRIBIDI_FLAG_REMOVE_JOINING 0x00020000
#define FRIBIDI_FLAG_REMOVE_SPECIALS 0x00040000
#define FRIBIDI_FLAGS_DEFAULT ( \
# FRIBIDI_FLAG_SHAPE_MIRRORING | \
# FRIBIDI_FLAG_REORDER_NSM | \
# FRIBIDI_FLAG_REMOVE_SPECIALS )
#define FRIBIDI_FLAGS_ARABIC ( \
# FRIBIDI_FLAG_SHAPE_ARAB_PRES | \
# FRIBIDI_FLAG_SHAPE_ARAB_LIGA )
FRIBIDI_FLAG_SHAPE_MIRRORING = 0x00000001
FRIBIDI_FLAG_REORDER_NSM = 0x00000002
FRIBIDI_FLAG_REMOVE_SPECIALS = 0x00040000
FRIBIDI_FLAG_SHAPE_ARAB_PRES = 0x00000100
FRIBIDI_FLAG_SHAPE_ARAB_LIGA = 0x00000200
FRIBIDI_FLAGS_DEFAULT = FRIBIDI_FLAG_SHAPE_MIRRORING | FRIBIDI_FLAG_REORDER_NSM | FRIBIDI_FLAG_REMOVE_SPECIALS
FRIBIDI_FLAGS_ARABIC = FRIBIDI_FLAG_SHAPE_ARAB_PRES | FRIBIDI_FLAG_SHAPE_ARAB_LIGA
MENU_DETECT_REGEX = re.compile("%x\\d+\\|")
##### Kernel processing funcs. #####
def protect_format_seq(msg):
"""
Find some specific escaping/formatting sequences (like \", %s, etc.,
and protect them from any modification!
"""
# LRM = "\u200E"
# RLM = "\u200F"
LRE = "\u202A"
RLE = "\u202B"
PDF = "\u202C"
LRO = "\u202D"
RLO = "\u202E"
uctrl = {LRE, RLE, PDF, LRO, RLO}
# Most likely incomplete, but seems to cover current needs.
format_codes = set("tslfd")
digits = set(".0123456789")
if not msg:
return msg
elif MENU_DETECT_REGEX.search(msg):
# An ugly "menu" message, just force it whole LRE if not yet done.
if msg[0] not in {LRE, LRO}:
msg = LRE + msg
idx = 0
ret = []
ln = len(msg)
while idx < ln:
dlt = 1
# # If we find a control char, skip any additional protection!
# if msg[idx] in uctrl:
# ret.append(msg[idx:])
# break
# \" or \'
if idx < (ln - 1) and msg[idx] == '\\' and msg[idx + 1] in "\"\'":
dlt = 2
# %x12|
elif idx < (ln - 2) and msg[idx] == '%' and msg[idx + 1] in "x" and msg[idx + 2] in digits:
dlt = 2
while (idx + dlt) < ln and msg[idx + dlt] in digits:
dlt += 1
if (idx + dlt) < ln and msg[idx + dlt] == '|':
dlt += 1
# %.4f
elif idx < (ln - 3) and msg[idx] == '%' and msg[idx + 1] in digits:
dlt = 2
while (idx + dlt) < ln and msg[idx + dlt] in digits:
dlt += 1
if (idx + dlt) < ln and msg[idx + dlt] in format_codes:
dlt += 1
else:
dlt = 1
# %s
elif idx < (ln - 1) and msg[idx] == '%' and msg[idx + 1] in format_codes:
dlt = 2
if dlt > 1:
ret.append(LRE)
ret += msg[idx:idx + dlt]
idx += dlt
if dlt > 1:
ret.append(PDF)
return "".join(ret)
def log2vis(msgs, settings):
"""
Globally mimics deprecated fribidi_log2vis.
msgs should be an iterable of messages to rtl-process.
"""
fbd = ctypes.CDLL(settings.FRIBIDI_LIB)
for msg in msgs:
msg = protect_format_seq(msg)
fbc_str = ctypes.create_unicode_buffer(msg)
ln = len(fbc_str) - 1
# print(fbc_str.value, ln)
btypes = (ctypes.c_int * ln)()
embed_lvl = (ctypes.c_uint8 * ln)()
pbase_dir = ctypes.c_int(FRIBIDI_PAR_ON)
jtypes = (ctypes.c_uint8 * ln)()
flags = FRIBIDI_FLAGS_DEFAULT | FRIBIDI_FLAGS_ARABIC
# Find out direction of each char.
fbd.fribidi_get_bidi_types(fbc_str, ln, ctypes.byref(btypes))
# print(*btypes)
fbd.fribidi_get_par_embedding_levels(btypes, ln,
ctypes.byref(pbase_dir),
embed_lvl)
# print(*embed_lvl)
# Joinings for arabic chars.
fbd.fribidi_get_joining_types(fbc_str, ln, jtypes)
# print(*jtypes)
fbd.fribidi_join_arabic(btypes, ln, embed_lvl, jtypes)
# print(*jtypes)
# Final Shaping!
fbd.fribidi_shape(flags, embed_lvl, ln, jtypes, fbc_str)
# print(fbc_str.value)
# print(*(ord(c) for c in fbc_str))
# And now, the reordering.
# Note that here, we expect a single line, so no need to do
# fancy things...
fbd.fribidi_reorder_line(flags, btypes, ln, 0, pbase_dir, embed_lvl,
fbc_str, None)
# print(fbc_str.value)
# print(*(ord(c) for c in fbc_str))
yield fbc_str.value
|
the-stack_0_24779
|
#
# Copyright 2018 DreamWorks Animation L.L.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function
from setuptools import setup, find_packages
from glob import glob
PACKAGE = "usdmanager"
execfile("{}/version.py".format(PACKAGE))
VERSION = __version__
setup(
name=PACKAGE,
version=VERSION,
description="Tool for browsing, editing, and managing USD and other text files.",
author="DreamWorks Animation",
author_email="[email protected]",
maintainer="Mark Sandell, DreamWorks Animation",
maintainer_email="[email protected]",
url="https://github.com/dreamworksanimation/usdmanager",
long_description=open("README.md").read(),
classifiers=[
# Get classifiers from:
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Natural Language :: English",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: Apache Software License",
],
packages=find_packages(),
# package_data will only find files that are located within python packages
package_data={
"usdmanager": [
"highlighters/*.py",
"plugins/*.py",
"*.json",
"*.ui"
]
},
# data_files will find all other files. It is a list of two member tuples.
# The first item of the tuple is the desired destination folder
# The second member of the tuple is a list of source files.
# Given data_files=[("xml_data", ["xml_examples/xml1.xml"])], xml1.xml will
# be copied to the "xml_data" folder of the destination package.
# the xml_examples folder will not be copied or created.
data_files=[("usdmanager", ["usdmanager/usdviewstyle.qss"])],
scripts=glob("scripts/*"),
install_requires=[
"Qt.py>=1.1",
"setuptools", # For pkg_resources
],
setup_requires=[
"setuptools>=2.2",
],
tests_require=[],
dependency_links=[],
)
|
the-stack_0_24780
|
import argparse
from acccmip5.access_cm import SearchCmip5
from acccmip5.download_dat import DownloadCmip5
from acccmip5.utilities.util import _check_list
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-dir", help="Download directory.", default=None)
parser.add_argument("-o","--output-options", help="S for 'Searching' or D for 'Downloading'. Use M to initiate the CMIP5DB module.", required=True)
parser.add_argument("-m", help="Model names", default=None)
parser.add_argument("-e", help="Experiment names", default=None)
parser.add_argument("-v", help="Variable names", default=None)
parser.add_argument("-f", help="Output frequency", default=None)
parser.add_argument("-r", help="Output realm", default=None)
parser.add_argument("-rlzn", help="Select realization", default=None)
parser.add_argument("-c", help="Checker: yes to check inputs", default=None)
parser.add_argument("-desc", help="Description: yes to print out experiment description", default=None)
parser.add_argument("-skip", help="Skip any item in your download", default=None)
args = parser.parse_args()
model = _check_list(args.m)
experiment = _check_list(args.e)
variable = _check_list(args.v)
frequency = _check_list(args.f)
realm = _check_list(args.r)
check = args.c
rlzn = _check_list(args.rlzn)
desc = args.desc
out = args.output_options
dl_dir = args.dir
skipped = args.skip
if (out == 'S'):
SearchCmip5(model=model, experiment=experiment, variable=variable, frequency=frequency, realm=realm, check=check, desc=desc)
elif (out == 'D'):
DownloadCmip5(model=model, experiment=experiment, variable=variable, frequency=frequency, realm=realm, check=check, path=dl_dir, rlzn=rlzn, skip=skipped)
elif (out == 'M'):
SearchCmip5(module='on', model=model, experiment=experiment, variable=variable, frequency=frequency, realm=realm)
|
the-stack_0_24781
|
import tumblr, reddit, time, re
class Bot():
def __init__(self, subreddit, queryType = 'hot', limit = 20, timer = 3600):
self.redditAPI = reddit.API()
self.tumblrAPI = tumblr.API()
self.subreddit = subreddit
self.timer = timer
self.queryType = queryType.lower()
self.limit = limit
self.latest = None
# get latest posts from specified subreddit via reddit API
def getLatestRedditPosts(self):
posts = self.redditAPI.getPosts(subreddit=self.subreddit,limit = self.limit, queryType = self.queryType, after=self.latest)
# update self.latest for later paginated queries
if(len(posts) > 0):
self.latest = "t3_" + str(posts[-1].id)
return posts
# get song artist from reddit post title
def getSongArtist(self, postTitle):
try:
return re.search('-{1,2} (.+) \[|\(', postTitle).groups()[0].replace('&', '&')
except:
return None
# get song title from reddit post title
def getSongTitle(self, postTitle):
try:
return re.search('(.+) -{1,2}', postTitle).groups()[0]
except:
return None
# get song genres from reddit post title
def getSongGenres(self, postTitle):
try:
allGenres = re.search('\[(.+)\]', postTitle).groups()[0];
except:
return None
if '/' in allGenres:
genres = [x.strip(' ').title() for x in allGenres.split('/')]
elif ',' in allGenres:
genres = [x.strip(' ').title() for x in allGenres.split(',')]
else:
genres = [allGenres.strip(' ').title()]
return genres
# get song year from reddit post title
def getSongYear(self, postTitle):
try:
return re.search('\((\d+)\)', postTitle).groups()[0];
except:
return None
# pull necessary information from reddit posts
def getFormattedRedditPosts(self):
redditPosts = self.getLatestRedditPosts()
formattedPosts = []
for post in (x for x in redditPosts if x is not None):
formattedPost = {}
# only parse and save post if it isn't a self post
if(not re.search('!reddit.com', post.url.lower(), re.IGNORECASE)):
formattedPost['url'] = post.url
formattedPost['artist'] = self.getSongArtist(post.title)
formattedPost['songTitle'] = self.getSongTitle(post.title)
formattedPost['genres'] = self.getSongGenres(post.title)
formattedPost['songYear'] = self.getSongYear(post.title)
# only process songs newer than 2012
if(formattedPost['songYear'] is not None and int(formattedPost['songYear']) > 2012):
formattedPosts.append(formattedPost)
return formattedPosts
# create Tumblr posts for all retrieved reddit posts
def createTumblrPosts(self, redditPosts):
for post in redditPosts:
# do something with youtube links
if(re.search('youtube.com', post['url'], re.IGNORECASE)):
self.tumblrAPI.createYoutubePost(post)
# do something with vimeo links
if(re.search('vimeo.com', post['url'], re.IGNORECASE)):
self.tumblrAPI.createVimeoPost(post)
# do something with audio links
elif(re.search('soundcloud.com|bandcamp.com', post['url'], re.IGNORECASE)):
self.tumblrAPI.createAudioPost(post)
# query for reddit posts and subsequently create Tumblr posts
def process(self):
redditPosts = self.getFormattedRedditPosts()
if(len(redditPosts) > 0):
self.createTumblrPosts(redditPosts)
def run(self):
cycleCount = 0
while True:
# start from beginning every 12 hours if we're retrieving 'hot' posts
if(self.queryType.lower() == 'hot'):
if(cycleCount == 12):
self.latest = None
cycleCount = 0
cycleCount += 1
self.process()
time.sleep(self.timer)
|
the-stack_0_24782
|
from __future__ import print_function, division
from collections import defaultdict
import operator
from sympy.core.sympify import sympify
from sympy.core.basic import Basic, C
from sympy.core.singleton import S
from sympy.core.operations import AssocOp
from sympy.core.cache import cacheit
from sympy.core.logic import fuzzy_not
from sympy.core.compatibility import cmp_to_key, reduce, xrange
from sympy.core.expr import Expr
# internal marker to indicate:
# "there are still non-commutative objects -- don't forget to process them"
class NC_Marker:
is_Order = False
is_Mul = False
is_Number = False
is_Poly = False
is_commutative = False
# Key for sorting commutative args in canonical order
_args_sortkey = cmp_to_key(Basic.compare)
def _mulsort(args):
# in-place sorting of args
args.sort(key=_args_sortkey)
def _unevaluated_Mul(*args):
"""Return a well-formed unevaluated Mul: Numbers are collected and
put in slot 0 and args are sorted. Use this when args have changed
but you still want to return an unevaluated Mul.
Examples
========
>>> from sympy.core.mul import _unevaluated_Mul as uMul
>>> from sympy import S, sqrt, Mul
>>> from sympy.abc import x
>>> a = uMul(*[S(3.0), x, S(2)])
>>> a.args[0]
6.00000000000000
>>> a.args[1]
x
Beyond the Number being in slot 0, there is no other flattening of
arguments, but two unevaluated Muls with the same arguments will
always compare as equal during testing:
>>> m = uMul(sqrt(2), sqrt(3))
>>> m == uMul(sqrt(3), sqrt(2))
True
>>> m == Mul(*m.args)
False
"""
args = list(args)
newargs = []
ncargs = []
co = S.One
while args:
a = args.pop()
if a.is_Mul:
c, nc = a.args_cnc()
args.extend(c)
if nc:
ncargs.append(Mul._from_args(nc))
elif a.is_Number:
co *= a
else:
newargs.append(a)
_mulsort(newargs)
if co is not S.One:
newargs.insert(0, co)
if ncargs:
newargs.append(Mul._from_args(ncargs))
return Mul._from_args(newargs)
class Mul(Expr, AssocOp):
__slots__ = []
is_Mul = True
#identity = S.One
# cyclic import, so defined in numbers.py
@classmethod
def flatten(cls, seq):
"""Return commutative, noncommutative and order arguments by
combining related terms.
Notes
=====
* In an expression like ``a*b*c``, python process this through sympy
as ``Mul(Mul(a, b), c)``. This can have undesirable consequences.
- Sometimes terms are not combined as one would like:
{c.f. https://github.com/sympy/sympy/issues/4596}
>>> from sympy import Mul, sqrt
>>> from sympy.abc import x, y, z
>>> 2*(x + 1) # this is the 2-arg Mul behavior
2*x + 2
>>> y*(x + 1)*2
2*y*(x + 1)
>>> 2*(x + 1)*y # 2-arg result will be obtained first
y*(2*x + 2)
>>> Mul(2, x + 1, y) # all 3 args simultaneously processed
2*y*(x + 1)
>>> 2*((x + 1)*y) # parentheses can control this behavior
2*y*(x + 1)
Powers with compound bases may not find a single base to
combine with unless all arguments are processed at once.
Post-processing may be necessary in such cases.
{c.f. https://github.com/sympy/sympy/issues/5728}
>>> a = sqrt(x*sqrt(y))
>>> a**3
(x*sqrt(y))**(3/2)
>>> Mul(a,a,a)
(x*sqrt(y))**(3/2)
>>> a*a*a
x*sqrt(y)*sqrt(x*sqrt(y))
>>> _.subs(a.base, z).subs(z, a.base)
(x*sqrt(y))**(3/2)
- If more than two terms are being multiplied then all the
previous terms will be re-processed for each new argument.
So if each of ``a``, ``b`` and ``c`` were :class:`Mul`
expression, then ``a*b*c`` (or building up the product
with ``*=``) will process all the arguments of ``a`` and
``b`` twice: once when ``a*b`` is computed and again when
``c`` is multiplied.
Using ``Mul(a, b, c)`` will process all arguments once.
* The results of Mul are cached according to arguments, so flatten
will only be called once for ``Mul(a, b, c)``. If you can
structure a calculation so the arguments are most likely to be
repeats then this can save time in computing the answer. For
example, say you had a Mul, M, that you wished to divide by ``d[i]``
and multiply by ``n[i]`` and you suspect there are many repeats
in ``n``. It would be better to compute ``M*n[i]/d[i]`` rather
than ``M/d[i]*n[i]`` since every time n[i] is a repeat, the
product, ``M*n[i]`` will be returned without flattening -- the
cached value will be returned. If you divide by the ``d[i]``
first (and those are more unique than the ``n[i]``) then that will
create a new Mul, ``M/d[i]`` the args of which will be traversed
again when it is multiplied by ``n[i]``.
{c.f. https://github.com/sympy/sympy/issues/5706}
This consideration is moot if the cache is turned off.
NB
--
The validity of the above notes depends on the implementation
details of Mul and flatten which may change at any time. Therefore,
you should only consider them when your code is highly performance
sensitive.
Removal of 1 from the sequence is already handled by AssocOp.__new__.
"""
rv = None
if len(seq) == 2:
a, b = seq
if b.is_Rational:
a, b = b, a
assert not a is S.One
if not a.is_zero and a.is_Rational:
r, b = b.as_coeff_Mul()
if b.is_Add:
if r is not S.One: # 2-arg hack
# leave the Mul as a Mul
rv = [cls(a*r, b, evaluate=False)], [], None
elif b.is_commutative:
if a is S.One:
rv = [b], [], None
else:
r, b = b.as_coeff_Add()
bargs = [_keep_coeff(a, bi) for bi in Add.make_args(b)]
_addsort(bargs)
ar = a*r
if ar:
bargs.insert(0, ar)
bargs = [Add._from_args(bargs)]
rv = bargs, [], None
if rv:
return rv
# apply associativity, separate commutative part of seq
c_part = [] # out: commutative factors
nc_part = [] # out: non-commutative factors
nc_seq = []
coeff = S.One # standalone term
# e.g. 3 * ...
c_powers = [] # (base,exp) n
# e.g. (x,n) for x
num_exp = [] # (num-base, exp) y
# e.g. (3, y) for ... * 3 * ...
neg1e = S.Zero # exponent on -1 extracted from Number-based Pow and I
pnum_rat = {} # (num-base, Rat-exp) 1/2
# e.g. (3, 1/2) for ... * 3 * ...
order_symbols = None
# --- PART 1 ---
#
# "collect powers and coeff":
#
# o coeff
# o c_powers
# o num_exp
# o neg1e
# o pnum_rat
#
# NOTE: this is optimized for all-objects-are-commutative case
for o in seq:
# O(x)
if o.is_Order:
o, order_symbols = o.as_expr_variables(order_symbols)
# Mul([...])
if o.is_Mul:
if o.is_commutative:
seq.extend(o.args) # XXX zerocopy?
else:
# NCMul can have commutative parts as well
for q in o.args:
if q.is_commutative:
seq.append(q)
else:
nc_seq.append(q)
# append non-commutative marker, so we don't forget to
# process scheduled non-commutative objects
seq.append(NC_Marker)
continue
# 3
elif o.is_Number:
if o is S.NaN or coeff is S.ComplexInfinity and o is S.Zero:
# we know for sure the result will be nan
return [S.NaN], [], None
elif coeff.is_Number: # it could be zoo
coeff *= o
if coeff is S.NaN:
# we know for sure the result will be nan
return [S.NaN], [], None
continue
elif o is S.ComplexInfinity:
if not coeff:
# 0 * zoo = NaN
return [S.NaN], [], None
if coeff is S.ComplexInfinity:
# zoo * zoo = zoo
return [S.ComplexInfinity], [], None
coeff = S.ComplexInfinity
continue
elif o is S.ImaginaryUnit:
neg1e += S.Half
continue
elif o.is_commutative:
# e
# o = b
b, e = o.as_base_exp()
# y
# 3
if o.is_Pow:
if b.is_Number:
# get all the factors with numeric base so they can be
# combined below, but don't combine negatives unless
# the exponent is an integer
if e.is_Rational:
if e.is_Integer:
coeff *= Pow(b, e) # it is an unevaluated power
continue
elif e.is_negative: # also a sign of an unevaluated power
seq.append(Pow(b, e))
continue
elif b.is_negative:
neg1e += e
b = -b
if b is not S.One:
pnum_rat.setdefault(b, []).append(e)
continue
elif b.is_positive or e.is_integer:
num_exp.append((b, e))
continue
elif b is S.ImaginaryUnit and e.is_Rational:
neg1e += e/2
continue
c_powers.append((b, e))
# NON-COMMUTATIVE
# TODO: Make non-commutative exponents not combine automatically
else:
if o is not NC_Marker:
nc_seq.append(o)
# process nc_seq (if any)
while nc_seq:
o = nc_seq.pop(0)
if not nc_part:
nc_part.append(o)
continue
# b c b+c
# try to combine last terms: a * a -> a
o1 = nc_part.pop()
b1, e1 = o1.as_base_exp()
b2, e2 = o.as_base_exp()
new_exp = e1 + e2
# Only allow powers to combine if the new exponent is
# not an Add. This allow things like a**2*b**3 == a**5
# if a.is_commutative == False, but prohibits
# a**x*a**y and x**a*x**b from combining (x,y commute).
if b1 == b2 and (not new_exp.is_Add):
o12 = b1 ** new_exp
# now o12 could be a commutative object
if o12.is_commutative:
seq.append(o12)
continue
else:
nc_seq.insert(0, o12)
else:
nc_part.append(o1)
nc_part.append(o)
# We do want a combined exponent if it would not be an Add, such as
# y 2y 3y
# x * x -> x
# We determine if two exponents have the same term by using
# as_coeff_Mul.
#
# Unfortunately, this isn't smart enough to consider combining into
# exponents that might already be adds, so things like:
# z - y y
# x * x will be left alone. This is because checking every possible
# combination can slow things down.
# gather exponents of common bases...
def _gather(c_powers):
new_c_powers = []
common_b = {} # b:e
for b, e in c_powers:
co = e.as_coeff_Mul()
common_b.setdefault(b, {}).setdefault(co[1], []).append(co[0])
for b, d in common_b.items():
for di, li in d.items():
d[di] = Add(*li)
for b, e in common_b.items():
for t, c in e.items():
new_c_powers.append((b, c*t))
return new_c_powers
# in c_powers
c_powers = _gather(c_powers)
# and in num_exp
num_exp = _gather(num_exp)
# --- PART 2 ---
#
# o process collected powers (x**0 -> 1; x**1 -> x; otherwise Pow)
# o combine collected powers (2**x * 3**x -> 6**x)
# with numeric base
# ................................
# now we have:
# - coeff:
# - c_powers: (b, e)
# - num_exp: (2, e)
# - pnum_rat: {(1/3, [1/3, 2/3, 1/4])}
# 0 1
# x -> 1 x -> x
for b, e in c_powers:
if e is S.One:
if b.is_Number:
coeff *= b
else:
c_part.append(b)
elif e is not S.Zero:
c_part.append(Pow(b, e))
# x x x
# 2 * 3 -> 6
inv_exp_dict = {} # exp:Mul(num-bases) x x
# e.g. x:6 for ... * 2 * 3 * ...
for b, e in num_exp:
inv_exp_dict.setdefault(e, []).append(b)
for e, b in inv_exp_dict.items():
inv_exp_dict[e] = cls(*b)
c_part.extend([Pow(b, e) for e, b in inv_exp_dict.items() if e])
# b, e -> e' = sum(e), b
# {(1/5, [1/3]), (1/2, [1/12, 1/4]} -> {(1/3, [1/5, 1/2])}
comb_e = {}
for b, e in pnum_rat.items():
comb_e.setdefault(Add(*e), []).append(b)
del pnum_rat
# process them, reducing exponents to values less than 1
# and updating coeff if necessary else adding them to
# num_rat for further processing
num_rat = []
for e, b in comb_e.items():
b = cls(*b)
if e.q == 1:
coeff *= Pow(b, e)
continue
if e.p > e.q:
e_i, ep = divmod(e.p, e.q)
coeff *= Pow(b, e_i)
e = Rational(ep, e.q)
num_rat.append((b, e))
del comb_e
# extract gcd of bases in num_rat
# 2**(1/3)*6**(1/4) -> 2**(1/3+1/4)*3**(1/4)
pnew = defaultdict(list)
i = 0 # steps through num_rat which may grow
while i < len(num_rat):
bi, ei = num_rat[i]
grow = []
for j in range(i + 1, len(num_rat)):
bj, ej = num_rat[j]
g = bi.gcd(bj)
if g is not S.One:
# 4**r1*6**r2 -> 2**(r1+r2) * 2**r1 * 3**r2
# this might have a gcd with something else
e = ei + ej
if e.q == 1:
coeff *= Pow(g, e)
else:
if e.p > e.q:
e_i, ep = divmod(e.p, e.q) # change e in place
coeff *= Pow(g, e_i)
e = Rational(ep, e.q)
grow.append((g, e))
# update the jth item
num_rat[j] = (bj/g, ej)
# update bi that we are checking with
bi = bi/g
if bi is S.One:
break
if bi is not S.One:
obj = Pow(bi, ei)
if obj.is_Number:
coeff *= obj
else:
# changes like sqrt(12) -> 2*sqrt(3)
for obj in Mul.make_args(obj):
if obj.is_Number:
coeff *= obj
else:
assert obj.is_Pow
bi, ei = obj.args
pnew[ei].append(bi)
num_rat.extend(grow)
i += 1
# combine bases of the new powers
for e, b in pnew.items():
pnew[e] = cls(*b)
# handle -1 and I
if neg1e:
# treat I as (-1)**(1/2) and compute -1's total exponent
p, q = neg1e.as_numer_denom()
# if the integer part is odd, extract -1
n, p = divmod(p, q)
if n % 2:
coeff = -coeff
# if it's a multiple of 1/2 extract I
if q == 2:
c_part.append(S.ImaginaryUnit)
elif p:
# see if there is any positive base this power of
# -1 can join
neg1e = Rational(p, q)
for e, b in pnew.items():
if e == neg1e and b.is_positive:
pnew[e] = -b
break
else:
# keep it separate; we've already evaluated it as
# much as possible so evaluate=False
c_part.append(Pow(S.NegativeOne, neg1e, evaluate=False))
# add all the pnew powers
c_part.extend([Pow(b, e) for e, b in pnew.items()])
# oo, -oo
if (coeff is S.Infinity) or (coeff is S.NegativeInfinity):
def _handle_for_oo(c_part, coeff_sign):
new_c_part = []
for t in c_part:
if t.is_positive:
continue
if t.is_negative:
coeff_sign *= -1
continue
new_c_part.append(t)
return new_c_part, coeff_sign
c_part, coeff_sign = _handle_for_oo(c_part, 1)
nc_part, coeff_sign = _handle_for_oo(nc_part, coeff_sign)
coeff *= coeff_sign
# zoo
if coeff is S.ComplexInfinity:
# zoo might be
# unbounded_real + bounded_im
# bounded_real + unbounded_im
# unbounded_real + unbounded_im
# and non-zero real or imaginary will not change that status.
c_part = [c for c in c_part if not (c.is_nonzero and
c.is_real is not None)]
nc_part = [c for c in nc_part if not (c.is_nonzero and
c.is_real is not None)]
# 0
elif coeff is S.Zero:
# we know for sure the result will be 0
return [coeff], [], order_symbols
# check for straggling Numbers that were produced
_new = []
for i in c_part:
if i.is_Number:
coeff *= i
else:
_new.append(i)
c_part = _new
# order commutative part canonically
_mulsort(c_part)
# current code expects coeff to be always in slot-0
if coeff is not S.One:
c_part.insert(0, coeff)
# we are done
if (not nc_part and len(c_part) == 2 and c_part[0].is_Number and
c_part[1].is_Add):
# 2*(1+a) -> 2 + 2 * a
coeff = c_part[0]
c_part = [Add(*[coeff*f for f in c_part[1].args])]
return c_part, nc_part, order_symbols
def _eval_power(b, e):
# don't break up NC terms: (A*B)**3 != A**3*B**3, it is A*B*A*B*A*B
cargs, nc = b.args_cnc(split_1=False)
if e.is_Integer:
return Mul(*[Pow(b, e, evaluate=False) for b in cargs]) * \
Pow(Mul._from_args(nc), e, evaluate=False)
p = Pow(b, e, evaluate=False)
if e.is_Rational or e.is_Float:
return p._eval_expand_power_base()
return p
@classmethod
def class_key(cls):
return 3, 0, cls.__name__
def _eval_evalf(self, prec):
c, m = self.as_coeff_Mul()
if c is S.NegativeOne:
if m.is_Mul:
rv = -AssocOp._eval_evalf(m, prec)
else:
mnew = m._eval_evalf(prec)
if mnew is not None:
m = mnew
rv = -m
else:
rv = AssocOp._eval_evalf(self, prec)
if rv.is_number:
return rv.expand()
return rv
@cacheit
def as_two_terms(self):
"""Return head and tail of self.
This is the most efficient way to get the head and tail of an
expression.
- if you want only the head, use self.args[0];
- if you want to process the arguments of the tail then use
self.as_coef_mul() which gives the head and a tuple containing
the arguments of the tail when treated as a Mul.
- if you want the coefficient when self is treated as an Add
then use self.as_coeff_add()[0]
>>> from sympy.abc import x, y
>>> (3*x*y).as_two_terms()
(3, x*y)
"""
args = self.args
if len(args) == 1:
return S.One, self
elif len(args) == 2:
return args
else:
return args[0], self._new_rawargs(*args[1:])
@cacheit
def as_coeff_mul(self, *deps):
if deps:
l1 = []
l2 = []
for f in self.args:
if f.has(*deps):
l2.append(f)
else:
l1.append(f)
return self._new_rawargs(*l1), tuple(l2)
args = self.args
if args[0].is_Rational:
return args[0], args[1:]
elif args[0] is S.NegativeInfinity:
return S.NegativeOne, (-args[0],) + args[1:]
return S.One, args
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
coeff, args = self.args[0], self.args[1:]
if coeff.is_Number and not (rational and not coeff.is_Rational):
if len(args) == 1:
return coeff, args[0]
else:
return coeff, self._new_rawargs(*args)
else:
return S.One, self
def as_real_imag(self, deep=True, **hints):
from sympy import expand_mul
other = []
coeff = S.One
addterms = S.One
for a in self.args:
if a.is_real or a.is_imaginary:
coeff *= a
elif a.is_commutative:
# search for complex conjugate pairs:
for i, x in enumerate(other):
if x == a.conjugate():
coeff *= C.Abs(x)**2
del other[i]
break
else:
if a.is_Add:
addterms *= a
else:
other.append(a)
else:
other.append(a)
m = self.func(*other)
if hints.get('ignore') == m:
return None
if addterms == 1:
if m == 1:
return (C.re(coeff), C.im(coeff))
rem, imm = (C.re(m), C.im(m))
if coeff.is_real:
return (coeff*rem, coeff*imm)
imco = C.im(coeff)
return (-imco*imm, imco*rem)
addre, addim = expand_mul(addterms, deep=False).as_real_imag()
if coeff.is_real:
return (coeff*(C.re(m)*addre - C.im(m)*addim), coeff*(C.im(m)*addre + C.re(m)*addim))
else:
re = - C.im(coeff)*C.im(m)
im = C.im(coeff)*C.re(m)
return (re*addre - im*addim, re*addim + im*addre)
@staticmethod
def _expandsums(sums):
"""
Helper function for _eval_expand_mul.
sums must be a list of instances of Basic.
"""
L = len(sums)
if L == 1:
return sums[0].args
terms = []
left = Mul._expandsums(sums[:L//2])
right = Mul._expandsums(sums[L//2:])
terms = [Mul(a, b) for a in left for b in right]
added = Add(*terms)
return Add.make_args(added) # it may have collapsed down to one term
def _eval_expand_mul(self, **hints):
from sympy import fraction
# Handle things like 1/(x*(x + 1)), which are automatically converted
# to 1/x*1/(x + 1)
expr = self
n, d = fraction(expr)
if d.is_Mul:
n, d = [i._eval_expand_mul(**hints) if i.is_Mul else i
for i in (n, d)]
expr = n/d
if not expr.is_Mul:
return expr
plain, sums, rewrite = [], [], False
for factor in expr.args:
if factor.is_Add:
sums.append(factor)
rewrite = True
else:
if factor.is_commutative:
plain.append(factor)
else:
sums.append(Basic(factor)) # Wrapper
if not rewrite:
return expr
else:
plain = self.func(*plain)
if sums:
terms = self.func._expandsums(sums)
args = []
for term in terms:
t = self.func(plain, term)
if t.is_Mul and any(a.is_Add for a in t.args):
t = t._eval_expand_mul()
args.append(t)
return Add(*args)
else:
return plain
def _eval_derivative(self, s):
terms = list(self.args)
factors = []
for i in xrange(len(terms)):
t = terms[i].diff(s)
if t is S.Zero:
continue
factors.append(self.func(*(terms[:i] + [t] + terms[i + 1:])))
return Add(*factors)
def _matches_simple(self, expr, repl_dict):
# handle (w*3).matches('x*5') -> {w: x*5/3}
coeff, terms = self.as_coeff_Mul()
terms = Mul.make_args(terms)
if len(terms) == 1:
newexpr = self.__class__._combine_inverse(expr, coeff)
return terms[0].matches(newexpr, repl_dict)
return
def matches(self, expr, repl_dict={}, old=False):
expr = sympify(expr)
if self.is_commutative and expr.is_commutative:
return AssocOp._matches_commutative(self, expr, repl_dict, old)
elif self.is_commutative is not expr.is_commutative:
return None
c1, nc1 = self.args_cnc()
c2, nc2 = expr.args_cnc()
repl_dict = repl_dict.copy()
if c1:
if not c2:
c2 = [1]
a = self.func(*c1)
if isinstance(a, AssocOp):
repl_dict = a._matches_commutative(self.func(*c2), repl_dict, old)
else:
repl_dict = a.matches(self.func(*c2), repl_dict)
if repl_dict:
a = self.func(*nc1)
if isinstance(a, self.func):
repl_dict = a._matches(self.func(*nc2), repl_dict)
else:
repl_dict = a.matches(self.func(*nc2), repl_dict)
return repl_dict or None
def _matches(self, expr, repl_dict={}):
# weed out negative one prefixes
sign = 1
a, b = self.as_two_terms()
if a is S.NegativeOne:
if b.is_Mul:
sign = -sign
else:
# the remainder, b, is not a Mul anymore
return b.matches(-expr, repl_dict)
expr = sympify(expr)
if expr.is_Mul and expr.args[0] is S.NegativeOne:
expr = -expr
sign = -sign
if not expr.is_Mul:
# expr can only match if it matches b and a matches +/- 1
if len(self.args) == 2:
# quickly test for equality
if b == expr:
return a.matches(Rational(sign), repl_dict)
# do more expensive match
dd = b.matches(expr, repl_dict)
if dd is None:
return None
dd = a.matches(Rational(sign), dd)
return dd
return None
d = repl_dict.copy()
# weed out identical terms
pp = list(self.args)
ee = list(expr.args)
for p in self.args:
if p in expr.args:
ee.remove(p)
pp.remove(p)
# only one symbol left in pattern -> match the remaining expression
if len(pp) == 1 and isinstance(pp[0], C.Wild):
if len(ee) == 1:
d[pp[0]] = sign * ee[0]
else:
d[pp[0]] = sign * expr.func(*ee)
return d
if len(ee) != len(pp):
return None
for p, e in zip(pp, ee):
d = p.xreplace(d).matches(e, d)
if d is None:
return None
return d
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs/rhs, but treats arguments like symbols, so things like
oo/oo return 1, instead of a nan.
"""
if lhs == rhs:
return S.One
def check(l, r):
if l.is_Float and r.is_comparable:
# if both objects are added to 0 they will share the same "normalization"
# and are more likely to compare the same. Since Add(foo, 0) will not allow
# the 0 to pass, we use __add__ directly.
return l.__add__(0) == r.evalf().__add__(0)
return False
if check(lhs, rhs) or check(rhs, lhs):
return S.One
if lhs.is_Mul and rhs.is_Mul:
a = list(lhs.args)
b = [1]
for x in rhs.args:
if x in a:
a.remove(x)
elif -x in a:
a.remove(-x)
b.append(-1)
else:
b.append(x)
return lhs.func(*a)/rhs.func(*b)
return lhs/rhs
def as_powers_dict(self):
d = defaultdict(int)
for term in self.args:
b, e = term.as_base_exp()
d[b] += e
return d
def as_numer_denom(self):
# don't use _from_args to rebuild the numerators and denominators
# as the order is not guaranteed to be the same once they have
# been separated from each other
numers, denoms = list(zip(*[f.as_numer_denom() for f in self.args]))
return self.func(*numers), self.func(*denoms)
def as_base_exp(self):
e1 = None
bases = []
nc = 0
for m in self.args:
b, e = m.as_base_exp()
if not b.is_commutative:
nc += 1
if e1 is None:
e1 = e
elif e != e1 or nc > 1:
return self, S.One
bases.append(b)
return self.func(*bases), e1
def _eval_is_polynomial(self, syms):
return all(term._eval_is_polynomial(syms) for term in self.args)
def _eval_is_rational_function(self, syms):
return all(term._eval_is_rational_function(syms) for term in self.args)
def _eval_is_algebraic_expr(self, syms):
return all(term._eval_is_algebraic_expr(syms) for term in self.args)
_eval_is_bounded = lambda self: self._eval_template_is_attr('is_bounded')
_eval_is_commutative = lambda self: self._eval_template_is_attr(
'is_commutative')
_eval_is_rational = lambda self: self._eval_template_is_attr('is_rational',
when_multiple=None)
def _eval_is_integer(self):
is_rational = self.is_rational
if is_rational:
n, d = self.as_numer_denom()
if d is S.One:
return True
elif d is S(2):
return n.is_even
elif is_rational is False:
return False
def _eval_is_polar(self):
has_polar = any(arg.is_polar for arg in self.args)
return has_polar and \
all(arg.is_polar or arg.is_positive for arg in self.args)
def _eval_is_real(self):
from sympy.core.logic import fuzzy_not
im_count = 0
is_neither = False
is_zero = False
for t in self.args:
if t.is_imaginary:
im_count += 1
continue
t_real = t.is_real
if t_real:
if not is_zero:
is_zero = fuzzy_not(t.is_nonzero)
if is_zero:
return True
continue
elif t_real is False:
if is_neither:
return
else:
is_neither = True
else:
return
if is_neither:
if im_count % 2 == 0:
if is_zero is False:
return False
else:
return im_count % 2 == 0
def _eval_is_imaginary(self):
if self.is_nonzero:
return (S.ImaginaryUnit*self).is_real
def _eval_is_hermitian(self):
nc_count = 0
im_count = 0
is_neither = False
for t in self.args:
if not t.is_commutative:
nc_count += 1
if nc_count > 1:
return None
if t.is_antihermitian:
im_count += 1
continue
t_real = t.is_hermitian
if t_real:
continue
elif t_real is False:
if is_neither:
return None
else:
is_neither = True
else:
return None
if is_neither:
return False
return (im_count % 2 == 0)
def _eval_is_antihermitian(self):
nc_count = 0
im_count = 0
is_neither = False
for t in self.args:
if not t.is_commutative:
nc_count += 1
if nc_count > 1:
return None
if t.is_antihermitian:
im_count += 1
continue
t_real = t.is_hermitian
if t_real:
continue
elif t_real is False:
if is_neither:
return None
else:
is_neither = True
else:
return None
if is_neither:
return False
return (im_count % 2 == 1)
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a:
others = list(self.args)
others.remove(t)
if all(x.is_rational is True for x in others):
return True
return None
if a is None:
return
return False
def _eval_is_zero(self):
zero = None
for a in self.args:
if a.is_zero:
zero = True
continue
bound = a.is_bounded
if not bound:
return bound
if zero:
return True
def _eval_is_positive(self):
"""Return True if self is positive, False if not, and None if it
cannot be determined.
This algorithm is non-recursive and works by keeping track of the
sign which changes when a negative or nonpositive is encountered.
Whether a nonpositive or nonnegative is seen is also tracked since
the presence of these makes it impossible to return True, but
possible to return False if the end result is nonpositive. e.g.
pos * neg * nonpositive -> pos or zero -> None is returned
pos * neg * nonnegative -> neg or zero -> False is returned
"""
sign = 1
saw_NON = False
for t in self.args:
if t.is_positive:
continue
elif t.is_negative:
sign = -sign
elif t.is_zero:
return False
elif t.is_nonpositive:
sign = -sign
saw_NON = True
elif t.is_nonnegative:
saw_NON = True
else:
return
if sign == 1 and saw_NON is False:
return True
if sign < 0:
return False
def _eval_is_negative(self):
"""Return True if self is negative, False if not, and None if it
cannot be determined.
This algorithm is non-recursive and works by keeping track of the
sign which changes when a negative or nonpositive is encountered.
Whether a nonpositive or nonnegative is seen is also tracked since
the presence of these makes it impossible to return True, but
possible to return False if the end result is nonnegative. e.g.
pos * neg * nonpositive -> pos or zero -> False is returned
pos * neg * nonnegative -> neg or zero -> None is returned
"""
sign = 1
saw_NON = False
for t in self.args:
if t.is_positive:
continue
elif t.is_negative:
sign = -sign
elif t.is_zero:
return False
elif t.is_nonpositive:
sign = -sign
saw_NON = True
elif t.is_nonnegative:
saw_NON = True
else:
return
if sign == -1 and saw_NON is False:
return True
if sign > 0:
return False
def _eval_is_odd(self):
is_integer = self.is_integer
if is_integer:
r, acc = True, 1
for t in self.args:
if not t.is_integer:
return None
elif t.is_even:
r = False
elif t.is_integer:
if r is False:
pass
elif acc != 1 and (acc + t).is_odd:
r = False
elif t.is_odd is None:
r = None
acc = t
return r
# !integer -> !odd
elif is_integer is False:
return False
def _eval_is_even(self):
is_integer = self.is_integer
if is_integer:
return fuzzy_not(self._eval_is_odd())
elif is_integer is False:
return False
def _eval_subs(self, old, new):
from sympy.functions.elementary.complexes import sign
from sympy.ntheory.factor_ import multiplicity
from sympy.simplify.simplify import powdenest, fraction
if not old.is_Mul:
return None
# try keep replacement literal so -2*x doesn't replace 4*x
if old.args[0].is_Number and old.args[0] < 0:
if self.args[0].is_Number:
if self.args[0] < 0:
return self._subs(-old, -new)
return None
def base_exp(a):
# if I and -1 are in a Mul, they get both end up with
# a -1 base (see issue 6421); all we want here are the
# true Pow or exp separated into base and exponent
if a.is_Pow or a.func is C.exp:
return a.as_base_exp()
return a, S.One
def breakup(eq):
"""break up powers of eq when treated as a Mul:
b**(Rational*e) -> b**e, Rational
commutatives come back as a dictionary {b**e: Rational}
noncommutatives come back as a list [(b**e, Rational)]
"""
(c, nc) = (defaultdict(int), list())
for a in Mul.make_args(eq):
a = powdenest(a)
(b, e) = base_exp(a)
if e is not S.One:
(co, _) = e.as_coeff_mul()
b = Pow(b, e/co)
e = co
if a.is_commutative:
c[b] += e
else:
nc.append([b, e])
return (c, nc)
def rejoin(b, co):
"""
Put rational back with exponent; in general this is not ok, but
since we took it from the exponent for analysis, it's ok to put
it back.
"""
(b, e) = base_exp(b)
return Pow(b, e*co)
def ndiv(a, b):
"""if b divides a in an extractive way (like 1/4 divides 1/2
but not vice versa, and 2/5 does not divide 1/3) then return
the integer number of times it divides, else return 0.
"""
if not b.q % a.q or not a.q % b.q:
return int(a/b)
return 0
# give Muls in the denominator a chance to be changed (see issue 5651)
# rv will be the default return value
rv = None
n, d = fraction(self)
if d is not S.One:
self2 = n._subs(old, new)/d._subs(old, new)
if not self2.is_Mul:
return self2._subs(old, new)
if self2 != self:
self = rv = self2
# Now continue with regular substitution.
# handle the leading coefficient and use it to decide if anything
# should even be started; we always know where to find the Rational
# so it's a quick test
co_self = self.args[0]
co_old = old.args[0]
co_xmul = None
if co_old.is_Rational and co_self.is_Rational:
# if coeffs are the same there will be no updating to do
# below after breakup() step; so skip (and keep co_xmul=None)
if co_old != co_self:
co_xmul = co_self.extract_multiplicatively(co_old)
elif co_old.is_Rational:
return rv
# break self and old into factors
(c, nc) = breakup(self)
(old_c, old_nc) = breakup(old)
# update the coefficients if we had an extraction
# e.g. if co_self were 2*(3/35*x)**2 and co_old = 3/5
# then co_self in c is replaced by (3/5)**2 and co_residual
# is 2*(1/7)**2
if co_xmul and co_xmul.is_Rational and abs(co_old) != 1:
mult = S(multiplicity(abs(co_old), co_self))
c.pop(co_self)
if co_old in c:
c[co_old] += mult
else:
c[co_old] = mult
co_residual = co_self/co_old**mult
else:
co_residual = 1
# do quick tests to see if we can't succeed
ok = True
if len(old_nc) > len(nc):
# more non-commutative terms
ok = False
elif len(old_c) > len(c):
# more commutative terms
ok = False
elif set(i[0] for i in old_nc).difference(set(i[0] for i in nc)):
# unmatched non-commutative bases
ok = False
elif set(old_c).difference(set(c)):
# unmatched commutative terms
ok = False
elif any(sign(c[b]) != sign(old_c[b]) for b in old_c):
# differences in sign
ok = False
if not ok:
return rv
if not old_c:
cdid = None
else:
rat = []
for (b, old_e) in old_c.items():
c_e = c[b]
rat.append(ndiv(c_e, old_e))
if not rat[-1]:
return rv
cdid = min(rat)
if not old_nc:
ncdid = None
for i in range(len(nc)):
nc[i] = rejoin(*nc[i])
else:
ncdid = 0 # number of nc replacements we did
take = len(old_nc) # how much to look at each time
limit = cdid or S.Infinity # max number that we can take
failed = [] # failed terms will need subs if other terms pass
i = 0
while limit and i + take <= len(nc):
hit = False
# the bases must be equivalent in succession, and
# the powers must be extractively compatible on the
# first and last factor but equal inbetween.
rat = []
for j in range(take):
if nc[i + j][0] != old_nc[j][0]:
break
elif j == 0:
rat.append(ndiv(nc[i + j][1], old_nc[j][1]))
elif j == take - 1:
rat.append(ndiv(nc[i + j][1], old_nc[j][1]))
elif nc[i + j][1] != old_nc[j][1]:
break
else:
rat.append(1)
j += 1
else:
ndo = min(rat)
if ndo:
if take == 1:
if cdid:
ndo = min(cdid, ndo)
nc[i] = Pow(new, ndo)*rejoin(nc[i][0],
nc[i][1] - ndo*old_nc[0][1])
else:
ndo = 1
# the left residual
l = rejoin(nc[i][0], nc[i][1] - ndo*
old_nc[0][1])
# eliminate all middle terms
mid = new
# the right residual (which may be the same as the middle if take == 2)
ir = i + take - 1
r = (nc[ir][0], nc[ir][1] - ndo*
old_nc[-1][1])
if r[1]:
if i + take < len(nc):
nc[i:i + take] = [l*mid, r]
else:
r = rejoin(*r)
nc[i:i + take] = [l*mid*r]
else:
# there was nothing left on the right
nc[i:i + take] = [l*mid]
limit -= ndo
ncdid += ndo
hit = True
if not hit:
# do the subs on this failing factor
failed.append(i)
i += 1
else:
if not ncdid:
return rv
# although we didn't fail, certain nc terms may have
# failed so we rebuild them after attempting a partial
# subs on them
failed.extend(range(i, len(nc)))
for i in failed:
nc[i] = rejoin(*nc[i]).subs(old, new)
# rebuild the expression
if cdid is None:
do = ncdid
elif ncdid is None:
do = cdid
else:
do = min(ncdid, cdid)
margs = []
for b in c:
if b in old_c:
# calculate the new exponent
e = c[b] - old_c[b]*do
margs.append(rejoin(b, e))
else:
margs.append(rejoin(b.subs(old, new), c[b]))
if cdid and not ncdid:
# in case we are replacing commutative with non-commutative,
# we want the new term to come at the front just like the
# rest of this routine
margs = [Pow(new, cdid)] + margs
return co_residual*self.func(*margs)*self.func(*nc)
def _eval_nseries(self, x, n, logx):
from sympy import powsimp
terms = [t.nseries(x, n=n, logx=logx) for t in self.args]
res = powsimp(self.func(*terms).expand(), combine='exp', deep=True)
if res.has(C.Order):
res += C.Order(x**n, x)
return res
def _eval_as_leading_term(self, x):
return self.func(*[t.as_leading_term(x) for t in self.args])
def _eval_conjugate(self):
return self.func(*[t.conjugate() for t in self.args])
def _eval_transpose(self):
return self.func(*[t.transpose() for t in self.args[::-1]])
def _eval_adjoint(self):
return self.func(*[t.adjoint() for t in self.args[::-1]])
def _sage_(self):
s = 1
for x in self.args:
s *= x._sage_()
return s
def as_content_primitive(self, radical=False):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import sqrt
>>> (-3*sqrt(2)*(2 - 2*sqrt(2))).as_content_primitive()
(6, -sqrt(2)*(-sqrt(2) + 1))
See docstring of Expr.as_content_primitive for more examples.
"""
coef = S.One
args = []
for i, a in enumerate(self.args):
c, p = a.as_content_primitive(radical=radical)
coef *= c
if p is not S.One:
args.append(p)
# don't use self._from_args here to reconstruct args
# since there may be identical args now that should be combined
# e.g. (2+2*x)*(3+3*x) should be (6, (1 + x)**2) not (6, (1+x)*(1+x))
return coef, self.func(*args)
def as_ordered_factors(self, order=None):
"""Transform an expression into an ordered list of factors.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x, y
>>> (2*x*y*sin(x)*cos(x)).as_ordered_factors()
[2, x, y, sin(x), cos(x)]
"""
cpart, ncpart = self.args_cnc()
cpart.sort(key=lambda expr: expr.sort_key(order=order))
return cpart + ncpart
@property
def _sorted_args(self):
return self.as_ordered_factors()
def prod(a, start=1):
"""Return product of elements of a. Start with int 1 so if only
ints are included then an int result is returned.
Examples
========
>>> from sympy import prod, S
>>> prod(range(3))
0
>>> type(_) is int
True
>>> prod([S(2), 3])
6
>>> _.is_Integer
True
You can start the product at something other than 1:
>>> prod([1, 2], 3)
6
"""
return reduce(operator.mul, a, start)
def _keep_coeff(coeff, factors, clear=True, sign=False):
"""Return ``coeff*factors`` unevaluated if necessary.
If ``clear`` is False, do not keep the coefficient as a factor
if it can be distributed on a single factor such that one or
more terms will still have integer coefficients.
If ``sign`` is True, allow a coefficient of -1 to remain factored out.
Examples
========
>>> from sympy.core.mul import _keep_coeff
>>> from sympy.abc import x, y
>>> from sympy import S
>>> _keep_coeff(S.Half, x + 2)
(x + 2)/2
>>> _keep_coeff(S.Half, x + 2, clear=False)
x/2 + 1
>>> _keep_coeff(S.Half, (x + 2)*y, clear=False)
y*(x + 2)/2
>>> _keep_coeff(S(-1), x + y)
-x - y
>>> _keep_coeff(S(-1), x + y, sign=True)
-(x + y)
"""
if not coeff.is_Number:
if factors.is_Number:
factors, coeff = coeff, factors
else:
return coeff*factors
if coeff is S.One:
return factors
elif coeff is S.NegativeOne and not sign:
return -factors
elif factors.is_Add:
if not clear and coeff.is_Rational and coeff.q != 1:
q = S(coeff.q)
for i in factors.args:
c, t = i.as_coeff_Mul()
r = c/q
if r == int(r):
return coeff*factors
return Mul._from_args((coeff, factors))
elif factors.is_Mul:
margs = list(factors.args)
if margs[0].is_Number:
margs[0] *= coeff
if margs[0] == 1:
margs.pop(0)
else:
margs.insert(0, coeff)
return Mul._from_args(margs)
else:
return coeff*factors
def expand_2arg(e):
from sympy.simplify.simplify import bottom_up
def do(e):
if e.is_Mul:
c, r = e.as_coeff_Mul()
if c.is_Number and r.is_Add:
return _unevaluated_Add(*[c*ri for ri in r.args])
return e
return bottom_up(e, do)
from .numbers import Rational
from .power import Pow
from .add import Add, _addsort, _unevaluated_Add
|
the-stack_0_24784
|
import re
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.base import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.functional import cached_property
from .models import GeometryColumns, SpatialRefSys
#### Classes used in constructing PostGIS spatial SQL ####
class PostGISOperator(SpatialOperation):
"For PostGIS operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(PostGISOperator, self).__init__(operator=operator)
class PostGISFunction(SpatialFunction):
"For PostGIS function calls (e.g., `ST_Contains(table, geom)`)."
def __init__(self, prefix, function, **kwargs):
super(PostGISFunction, self).__init__(prefix + function, **kwargs)
class PostGISFunctionParam(PostGISFunction):
"For PostGIS functions that take another parameter (e.g. DWithin, Relate)."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class PostGISDistance(PostGISFunction):
"For PostGIS distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, prefix, operator):
super(PostGISDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSpheroidDistance(PostGISFunction):
"For PostGIS spherical distance operations (using the spheroid)."
dist_func = 'distance_spheroid'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s) %(operator)s %%s'
def __init__(self, prefix, operator):
# An extra parameter in `end_subst` is needed for the spheroid string.
super(PostGISSpheroidDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSphereDistance(PostGISDistance):
"For PostGIS spherical distance operations."
dist_func = 'distance_sphere'
class PostGISRelate(PostGISFunctionParam):
"For PostGIS Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, prefix, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(PostGISRelate, self).__init__(prefix, 'Relate')
class PostGISOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'postgis'
postgis = True
geom_func_prefix = 'ST_'
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = {'Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union'}
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
prefix = self.geom_func_prefix
# PostGIS-specific operators. The commented descriptions of these
# operators come from Section 7.6 of the PostGIS 1.4 documentation.
self.geometry_operators = {
# The "&<" operator returns true if A's bounding box overlaps or
# is to the left of B's bounding box.
'overlaps_left': PostGISOperator('&<'),
# The "&>" operator returns true if A's bounding box overlaps or
# is to the right of B's bounding box.
'overlaps_right': PostGISOperator('&>'),
# The "<<" operator returns true if A's bounding box is strictly
# to the left of B's bounding box.
'left': PostGISOperator('<<'),
# The ">>" operator returns true if A's bounding box is strictly
# to the right of B's bounding box.
'right': PostGISOperator('>>'),
# The "&<|" operator returns true if A's bounding box overlaps or
# is below B's bounding box.
'overlaps_below': PostGISOperator('&<|'),
# The "|&>" operator returns true if A's bounding box overlaps or
# is above B's bounding box.
'overlaps_above': PostGISOperator('|&>'),
# The "<<|" operator returns true if A's bounding box is strictly
# below B's bounding box.
'strictly_below': PostGISOperator('<<|'),
# The "|>>" operator returns true if A's bounding box is strictly
# above B's bounding box.
'strictly_above': PostGISOperator('|>>'),
# The "~=" operator is the "same as" operator. It tests actual
# geometric equality of two features. So if A and B are the same feature,
# vertex-by-vertex, the operator returns true.
'same_as': PostGISOperator('~='),
'exact': PostGISOperator('~='),
# The "@" operator returns true if A's bounding box is completely contained
# by B's bounding box.
'contained': PostGISOperator('@'),
# The "~" operator returns true if A's bounding box completely contains
# by B's bounding box.
'bbcontains': PostGISOperator('~'),
# The "&&" operator returns true if A's bounding box overlaps
# B's bounding box.
'bboverlaps': PostGISOperator('&&'),
}
self.geometry_functions = {
'equals': PostGISFunction(prefix, 'Equals'),
'disjoint': PostGISFunction(prefix, 'Disjoint'),
'touches': PostGISFunction(prefix, 'Touches'),
'crosses': PostGISFunction(prefix, 'Crosses'),
'within': PostGISFunction(prefix, 'Within'),
'overlaps': PostGISFunction(prefix, 'Overlaps'),
'contains': PostGISFunction(prefix, 'Contains'),
'intersects': PostGISFunction(prefix, 'Intersects'),
'relate': (PostGISRelate, six.string_types),
'coveredby': PostGISFunction(prefix, 'CoveredBy'),
'covers': PostGISFunction(prefix, 'Covers'),
}
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
def get_dist_ops(operator):
"Returns operations for both regular and spherical distances."
return {'cartesian': PostGISDistance(prefix, operator),
'sphere': PostGISSphereDistance(prefix, operator),
'spheroid': PostGISSpheroidDistance(prefix, operator),
}
self.distance_functions = {
'distance_gt': (get_dist_ops('>'), dtypes),
'distance_gte': (get_dist_ops('>='), dtypes),
'distance_lt': (get_dist_ops('<'), dtypes),
'distance_lte': (get_dist_ops('<='), dtypes),
'dwithin': (PostGISFunctionParam(prefix, 'DWithin'), dtypes)
}
# Adding the distance functions to the geometries lookup.
self.geometry_functions.update(self.distance_functions)
# Only PostGIS versions 1.3.4+ have GeoJSON serialization support.
if self.spatial_version < (1, 3, 4):
GEOJSON = False
else:
GEOJSON = prefix + 'AsGeoJson'
# ST_ContainsProperly ST_MakeLine, and ST_GeoHash added in 1.4.
if self.spatial_version >= (1, 4, 0):
GEOHASH = 'ST_GeoHash'
BOUNDINGCIRCLE = 'ST_MinimumBoundingCircle'
self.geometry_functions['contains_properly'] = PostGISFunction(prefix, 'ContainsProperly')
else:
GEOHASH, BOUNDINGCIRCLE = False, False
# Geography type support added in 1.5.
if self.spatial_version >= (1, 5, 0):
self.geography = True
# Only a subset of the operators and functions are available
# for the geography type.
self.geography_functions = self.distance_functions.copy()
self.geography_functions.update({
'coveredby': self.geometry_functions['coveredby'],
'covers': self.geometry_functions['covers'],
'intersects': self.geometry_functions['intersects'],
})
self.geography_operators = {
'bboverlaps': PostGISOperator('&&'),
}
# Native geometry type support added in PostGIS 2.0.
if self.spatial_version >= (2, 0, 0):
self.geometry = True
# Creating a dictionary lookup of all GIS terms for PostGIS.
self.gis_terms = set(['isnull'])
self.gis_terms.update(self.geometry_operators)
self.gis_terms.update(self.geometry_functions)
self.area = prefix + 'Area'
self.bounding_circle = BOUNDINGCIRCLE
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = GEOHASH
self.geojson = GEOJSON
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = prefix + 'AsKML'
self.length = prefix + 'Length'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = prefix + 'MakeLine'
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points = prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = prefix + 'Union'
self.unionagg = prefix + 'Union'
if self.spatial_version >= (2, 0, 0):
self.extent3d = prefix + '3DExtent'
self.length3d = prefix + '3DLength'
self.perimeter3d = prefix + '3DPerimeter'
else:
self.extent3d = prefix + 'Extent3D'
self.length3d = prefix + 'Length3D'
self.perimeter3d = prefix + 'Perimeter3D'
@cached_property
def spatial_version(self):
"""Determine the version of the PostGIS library."""
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
if hasattr(settings, 'POSTGIS_VERSION'):
version = settings.POSTGIS_VERSION
else:
try:
vtup = self.postgis_version_tuple()
except DatabaseError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s". '
'GeoDjango requires at least PostGIS version 1.3. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
version = vtup[1:]
return version
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_extent(self, box):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returnded by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given geometry field.
Typically this is `None` because geometry columns are added via
the `AddGeometryColumn` stored procedure, unless the field
has been specified to be of geography type instead.
"""
if f.geography:
if not self.geography:
raise NotImplementedError('PostGIS 1.5 required for geography column support.')
if f.srid != 4326:
raise NotImplementedError('PostGIS 1.5 supports geography columns '
'only with an SRID of 4326.')
return 'geography(%s,%d)' % (f.geom_type, f.srid)
elif self.geometry:
# Postgis 2.0 supports type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
return 'geometry(%s,%d)' % (geom_type, f.srid)
else:
return None
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the newly introduced geography column type introudced in PostGIS 1.5.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography and self.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
if value is None or value.srid == f.srid:
placeholder = '%s'
else:
# Adding Transform() to the SQL placeholder.
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'expression'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
placeholder = placeholder % self.get_expression_column(value)
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
# Close out the connection. See #9437.
with self.connection.temporary_connection() as cursor:
cursor.execute('SELECT %s()' % func)
return cursor.fetchone()[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def num_params(self, lookup_type, num_param):
"""
Helper routine that returns a boolean indicating whether the number of
parameters is correct for the lookup type.
"""
def exactly_two(np):
return np == 2
def two_to_three(np):
return np >= 2 and np <= 3
if (lookup_type in self.distance_functions and
lookup_type != 'dwithin'):
return two_to_three(num_param)
else:
return exactly_two(num_param)
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Constructs spatial SQL from the given lookup value tuple a
(alias, col, db_type), the lookup type string, lookup value, and
the geometry field.
"""
alias, col, db_type = lvalue
# Getting the quoted geometry column.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_operators:
if field.geography and not lookup_type in self.geography_operators:
raise ValueError('PostGIS geography does not support the '
'"%s" lookup.' % lookup_type)
# Handling a PostGIS operator.
op = self.geometry_operators[lookup_type]
return op.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type in self.geometry_functions:
if field.geography and not lookup_type in self.geography_functions:
raise ValueError('PostGIS geography type does not support the '
'"%s" lookup.' % lookup_type)
# See if a PostGIS geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the PostGISOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
nparams = len(value)
if not self.num_params(lookup_type, nparams):
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(self.geom_func_prefix, value[1])
elif lookup_type in self.distance_functions and lookup_type != 'dwithin':
if not field.geography and field.geodetic(self.connection):
# Geodetic distances are only available from Points to
# PointFields on PostGIS 1.4 and below.
if not self.connection.ops.geography:
if field.geom_type != 'POINT':
raise ValueError('PostGIS spherical operations are only valid on PointFields.')
if str(geom.geom_type) != 'Point':
raise ValueError('PostGIS geometry distance parameter is required to be of type Point.')
# Setting up the geodetic operation appropriately.
if nparams == 3 and value[2] == 'spheroid':
op = op['spheroid']
else:
op = op['sphere']
else:
op = op['cartesian']
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, ('' if value else 'NOT ')), []
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union':
agg_name += 'agg'
sql_template = '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
return GeometryColumns
def spatial_ref_sys(self):
return SpatialRefSys
|
the-stack_0_24785
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import pyunitwizard
# -- Project information -----------------------------------------------------
project = 'PyUnitWizard'
copyright = ('2021, UIBCDF Lab at the Mexico City Childrens Hospital Federico Gomez and authors.'
'Project structure based on the Computational Molecular Science Python Cookiecutter version 1.5')
author = 'Liliana M. Moreno Vargas & Diego Prada Gracia'
# The short X.Y version
version = pyunitwizard.__version__.split('+')[0]
# The full version, including alpha/beta/rc tags
release = pyunitwizard.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.githubpages',
'sphinxcontrib.bibtex',
'nbsphinx',
'recommonmark',
'sphinx_markdown_tables',
'sphinx.ext.extlinks'
]
autosummary_generate = True
napoleon_google_docstring = False
napoleon_use_param = False
napoleon_use_ivar = True
# sphinxcontrib-bibtex
bibtex_bibfiles = ['bibliography.bib'] # list of *.bib files
bibtex_default_style = 'alpha'
bibtex_encoding = 'utf-8-sig'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_parsers={
}
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown'
}
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language was edited to use sphinx-intl
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
#html_theme_options = {
# 'canonical_url': '',
# 'analytics_id': '',
# 'logo_only': False,
# 'display_version': True,
# 'prev_next_buttons_location': 'bottom',
# 'style_external_links': False,
# # Toc options
# 'collapse_navigation': False,
# 'sticky_navigation': True,
# 'navigation_depth': 4,
# 'includehidden': True,
# 'titles_only': False
#}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom css
html_css_files = [
'custom.css',
]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_show_sourcelink = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyunitwizarddoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyunitwizard.tex', 'PyUnitWizard Documentation',
'pyunitwizard', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyunitwizard', 'PyUnitWizard Documentation',
[author], 1)
]
# -- 1 for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyunitwizard', 'PyUnitWizard Documentation',
author, 'pyunitwizard', 'This must be a short description of the project',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
the-stack_0_24786
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <[email protected]>
# please also see AUTHORS file
# :copyright: (c) 2013-2017, Isis Lovecruft
# (c) 2007-2017, The Tor Project, Inc.
# (c) 2007-2017, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""deprecated ― functions and classes which have been removed from the
production code but are kept in order to be used in regression testing.
"""
import ipaddr
import re
from twisted.python import deprecate
from twisted.python.versions import Version
@deprecate.deprecated(
Version('bridgedb', 0, 2, 4),
replacement='bridgedb.bridges.Bridge')
class Bridge(object):
"""Holds information for a single bridge, along with any Pluggable
Transports it is also running.
:attr str nickname: The bridge's nickname. Not currently used.
:attr ip: (:class:`ipaddr.IPAddress`) The bridge's IPv4 address, specified
on the 'r'-line in a networkstatus document.
:attr int orport: The bridge's OR port.
:attr dict or_addresses: The bridges alternate IP addresses. The keys
should be instances of ``ipaddr.IPAddress``, and the value should be a
:class:`bridgedb.parse.addr.PortList` for the port(s) on which that
address is listening.
:attr list transports: List of :class:`PluggableTransport` instances for
each PT which the bridge supports.
:attr str fingerprint: The bridge's identity digest, in lowercase hex,
without whitespace.
:attr bool running: ``True``, if this bridge was given the ``Running`` flag.
:attr bool stable: ``True``, if this bridge was given the ``Stable`` flag.
:attr dict blockingCountries: A dictionary whose keys are strings of
``"IP:port"`` pairs, and the keys are lists of two letter country
codes which block that IP:port. For example::
{"1.2.3.4:9001": ['sk', 'us', 'ir', 'cn']}
:attr str desc_digest: SHA1 hexdigest of the bridge's descriptor as
defined in the networkstatus document.
:attr str ei_digest: SHA1 hexdigest of the bridge's extra-info document as
given in the bridge's descriptor, corresponding to desc_digest.
:attr bool verified: Did we receive the descriptor for this bridge that
was specified in the networkstatus?
"""
def __init__(self, nickname, ip, orport, fingerprint=None, id_digest=None,
or_addresses=None, transports=None):
"""Create a new Bridge. One of fingerprint and id_digest must be set.
"""
self.nickname = nickname
self.ip = ip
self.orport = orport
if not or_addresses: or_addresses = {}
self.or_addresses = or_addresses
if not transports: transports = []
self.transports = transports
self.running = self.stable = None
self.blockingCountries = {}
self.desc_digest = None
self.ei_digest = None
self.verified = False
if id_digest is not None:
assert fingerprint is None
if len(id_digest) != DIGEST_LEN:
raise TypeError("Bridge with invalid ID")
self.fingerprint = toHex(id_digest)
elif fingerprint is not None:
if not isValidFingerprint(fingerprint):
raise TypeError("Bridge with invalid fingerprint (%r)"%
fingerprint)
self.fingerprint = fingerprint.lower()
else:
raise TypeError("Bridge with no ID")
def setDescriptorDigest(self, digest):
"""Set the descriptor digest, specified in the NS."""
self.desc_digest = digest
def setExtraInfoDigest(self, digest):
"""Set the extra-info digest, specified in the descriptor."""
self.ei_digest = digest
def setVerified(self):
"""Call when the bridge's descriptor is parsed"""
self.verified = True
def isVerified(self):
"""Returns the truthiness of ``verified``"""
return self.verified
def getID(self):
"""Return the bridge's identity digest."""
return fromHex(self.fingerprint)
def __repr__(self):
"""Return a piece of python that evaluates to this bridge."""
if self.or_addresses:
return "Bridge(%r,%r,%d,%r,or_addresses=%s)"%(
self.nickname, self.ip, self.orport, self.fingerprint,
self.or_addresses)
return "Bridge(%r,%r,%d,%r)"%(
self.nickname, self.ip, self.orport, self.fingerprint)
def getConfigLine(self, includeFingerprint=False, addressClass=None,
request=None, transport=None):
"""Returns a valid bridge line for inclusion in a torrc.
:param bool includeFingerprint: If ``True``, include the
``fingerprint`` of this :class:`Bridge` in the returned bridge
line.
:param DOCDOC addressClass: Type of address to choose.
:param str request: A string unique to this request e.g. email-address
or ``uniformMap(ip)`` or ``'default'``.
:param str transport: A pluggable transport method name.
"""
if not request: request = 'default'
digest = getHMACFunc('Order-Or-Addresses')(request)
pos = int(digest[:8], 16) # lower 8 bytes -> int
# default address type
if not addressClass: addressClass = ipaddr.IPv4Address
# pluggable transports
if transport:
# filter by 'methodname'
transports = filter(lambda x: transport == x.methodname,
self.transports)
# filter by 'addressClass'
transports = filter(lambda x: isinstance(x.address, addressClass),
transports)
if transports:
pt = transports[pos % len(transports)]
return pt.getTransportLine(includeFingerprint)
# filter addresses by address class
addresses = filter(lambda x: isinstance(x[0], addressClass),
self.or_addresses.items())
# default ip, orport should get a chance at being selected
if isinstance(self.ip, addressClass):
addresses.insert(0,(self.ip, addr.PortList(self.orport)))
if addresses:
address,portlist = addresses[pos % len(addresses)]
if isinstance(address, ipaddr.IPv6Address): ip = "[%s]"%address
else: ip = "%s"%address
orport = portlist[pos % len(portlist)]
if includeFingerprint:
return "%s:%d %s" % (ip, orport, self.fingerprint)
else:
return "%s:%d" % (ip, orport)
def getAllConfigLines(self,includeFingerprint=False):
"""Generator. Iterate over all valid config lines for this bridge."""
for address,portlist in self.or_addresses.items():
if type(address) is ipaddr.IPv6Address:
ip = "[%s]" % address
else:
ip = "%s" % address
for orport in portlist:
if includeFingerprint:
yield "bridge %s:%d %s" % (ip,orport,self.fingerprint)
else:
yield "bridge %s:%d" % (ip,orport)
for pt in self.transports:
yield pt.getTransportLine(includeFingerprints)
def assertOK(self):
assert is_valid_ip(self.ip)
assert isValidFingerprint(self.fingerprint)
assert 1 <= self.orport <= 65535
if self.or_addresses:
for address, portlist in self.or_addresses.items():
assert is_valid_ip(address)
for port in portlist:
assert type(port) is int
assert 1 <= port <= 65535
def setStatus(self, running=None, stable=None):
if running is not None:
self.running = running
if stable is not None:
self.stable = stable
def isBlocked(self, countryCode, addressClass, methodname=None):
""" if at least one address:port of the selected addressClass and
(optional) transport type is not blocked in countryCode, return True
"""
# 1) transport is specified
if methodname is not None:
for transport in self.transports:
key = "%s:%s" % (transport.address, transport.port)
if (isinstance(transport.address, addressClass)
and transport.methodname.lower() == methodname.lower()):
try:
if countryCode not in self.blockingCountries[key]:
return False
except KeyError:
return False # no blocklist
return True
# 2) no transport specified (default)
else:
# 3) check primary ip, port
# XXX: could be more elegant if ip,orport were not special case
if isinstance(self.ip, addressClass):
key = "%s:%s" % (self.ip, self.orport)
try:
if countryCode not in self.blockingCountries[key]:
return False
except KeyError: return False # no blocklist
# 4) check or addresses
for address,portlist in self.or_addresses.items():
if isinstance(address, addressClass):
# check each port
for port in portlist:
key = "%s:%s" % (address, port)
try:
if countryCode not in self.blockingCountries[key]:
return False
except KeyError: return False # no blocklist
return True
# Bridge Stability (#5482) properties.
@property
def familiar(self):
"""
A bridge is 'familiar' if 1/8 of all active bridges have appeared
more recently than it, or if it has been around for a Weighted Time of 8 days.
"""
with bridgedb.Storage.getDB() as db:
return db.getBridgeHistory(self.fingerprint).familiar
@property
def wfu(self):
"""Weighted Fractional Uptime"""
with bridgedb.Storage.getDB() as db:
return db.getBridgeHistory(self.fingerprint).weightedFractionalUptime
@property
def weightedTime(self):
"""Weighted Time"""
with bridgedb.Storage.getDB() as db:
return db.getBridgeHistory(self.fingerprint).weightedTime
@property
def wmtbac(self):
"""Weighted Mean Time Between Address Change"""
with bridgedb.Storage.getDB() as db:
return db.getBridgeHistory(self.fingerprint).wmtbac
@property
def tosa(self):
"""the Time On Same Address (TOSA)"""
with bridgedb.Storage.getDB() as db:
return db.getBridgeHistory(self.fingerprint).tosa
@property
def weightedUptime(self):
"""Weighted Uptime"""
with bridgedb.Storage.getDB() as db:
return db.getBridgeHistory(self.fingerprint).weightedUptime
@deprecate.deprecated(
Version('bridgedb', 0, 2, 4),
replacement='bridgedb.bridges.PluggableTransport')
class PluggableTransport(object):
"""A PT with reference to the parent bridge on which it is running.
Deprecated :class:`bridgedb.Bridges.PluggableTransport`, replaced in
bridgedb-0.2.4, by :class:`bridgedb.bridges.PluggableTransport`.
"""
def __init__(self, bridge, methodname, address, port, argdict=None):
"""Create a ``PluggableTransport`` describing a PT running on a bridge.
Pluggable transports are described within a bridge's ``@type
bridge-extrainfo`` descriptor, see the ``Specifications: Client
behavior`` section and the ``TOR_PT_SERVER_TRANSPORT_OPTIONS``
description in pt-spec.txt_ for additional specification.
:type bridge: :class:`Bridge`
:param bridge: The parent bridge running this pluggable transport
instance, i.e. the main ORPort bridge whose
``@type bridge-server-descriptor`` contains a hash digest for a
``@type bridge-extrainfo-document``, the latter of which contains
the parameter of this pluggable transport in its ``transport``
line.
:param str methodname: The canonical "name" for this pluggable
transport, i.e. the one which would be specified in a torrc
file. For example, ``"obfs2"``, ``"obfs3"``, ``"scramblesuit"``
would all be pluggable transport method names.
:param str address: The IP address of the transport. Currently (as of
20 March 2014), there are no known, widely-deployed pluggable
transports which support IPv6. Ergo, this is very likely going to
be an IPv4 address.
:param int port: A integer specifying the port which this pluggable
transport is listening on. (This should likely be whatever port the
bridge specified in its ``ServerTransportPlugin`` torrc line,
unless the pluggable transport is running in "managed" mode.)
:param dict argdict: Some PTs can take additional arguments, which
must be distributed to the client out-of-band. These are present
in the ``@type bridge-extrainfo-document``, in the ``transport``
line like so::
METHOD SP ADDR ":" PORT SP [K=V[,K=V[,K=V[…]]]]
where K is the **argdict** key, and V is the value. For example,
in the case of ``scramblesuit``, for which the client must supply
a shared secret to the ``scramblesuit`` instance running on the
bridge, the **argdict** would be something like::
{'password': 'NEQGQYLUMUQGK5TFOJ4XI2DJNZTS4LRO'}
.. _pt-spec.txt:
https://gitweb.torproject.org/torspec.git/tree/pt-spec.txt
"""
#XXX: assert are disabled with python -O
assert isinstance(bridge, Bridge)
assert type(address) in (ipaddr.IPv4Address, ipaddr.IPv6Address)
assert type(port) is int
assert (0 < port < 65536)
assert type(methodname) is str
self.bridge = bridge
self.address = address
self.port = port
self.methodname = methodname
if type(argdict) is dict:
self.argdict = argdict
else: self.argdict = {}
def getTransportLine(self, includeFingerprint=False, bridgePrefix=False):
"""Get a torrc line for this pluggable transport.
This method does not return lines which are prefixed with the word
'bridge', as they would be in a torrc file. Instead, lines returned
look like this:
obfs3 245.102.100.252:23619 59ca743e89b508e16b8c7c6d2290efdfd14eea98
:param bool includeFingerprints: If ``True``, include the digest of
this bridges public identity key in the torrc line.
:param bool bridgePrefix: If ``True``, add ``'Bridge '`` to the
beginning of each returned line (suitable for pasting directly
into a torrc file).
:rtype: str
:returns: A configuration line for adding this pluggable transport
into a torrc file.
"""
sections = []
if bridgePrefix:
sections.append('Bridge')
if isinstance(self.address, ipaddr.IPv6Address):
host = "%s [%s]:%d" % (self.methodname, self.address, self.port)
else:
host = "%s %s:%d" % (self.methodname, self.address, self.port)
sections.append(host)
if includeFingerprint:
sections.append(self.bridge.fingerprint)
args = " ".join(["%s=%s" % (k, v) for k, v in self.argdict.items()])
sections.append(args)
line = ' '.join(sections)
return line
@deprecate.deprecated(
Version('bridgedb', 0, 0, 1),
replacement='bridgedb.parse.addr.PortList')
class PortList:
"""Deprecated :class:`bridgedb.Bridges.PortList`, replaced in
bridgedb-0.1.0, in commit 1f111e5, by
:class:`bridgedb.parse.addr.PortList`.
This class and the newer class from :mod:`bridgedb.parse.addr` are
alternately :api:`~twisted.python.monkey.MonkeyPatcher.patch`ed into the
:mod:`old unittests <bridgedb.Tests>`, so that the later functions as a
suite of regression tests.
"""
def __init__(self, *args, **kwargs):
self.ports = set()
self.add(*args)
def _sanitycheck(self, val):
#XXX: if debug=False this is disabled. bad!
assert type(val) is int
assert(0 < val <= 65535)
def __contains__(self, val1):
return val1 in self.ports
def add(self, *args):
PORTSPEC_LEN = 16
for arg in args:
try:
if type(arg) is str:
ports = set([int(p) for p in arg.split(',')][:PORTSPEC_LEN])
[self._sanitycheck(p) for p in ports]
self.ports.update(ports)
if type(arg) is int:
self._sanitycheck(arg)
self.ports.update([arg])
if type(arg) is PortList:
self.add(list(arg.ports))
except AssertionError: raise ValueError
except ValueError: raise
def __iter__(self):
return self.ports.__iter__()
def __str__(self):
s = ""
for p in self.ports:
s += "".join(",%s"%p)
return s.lstrip(",")
def __repr__(self):
return "PortList('%s')" % self.__str__()
def __len__(self):
return len(self.ports)
def __getitem__(self, x):
return list(self.ports)[x]
|
the-stack_0_24787
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# Android builder
"""
Usage (in wscript):
def options(opt):
opt.load('android')
def configure(conf):
conf.load('android')
"""
import os, sys, random, time, re, stat, string, imghdr, multiprocessing
import atexit, shutil, threading, collections, hashlib, subprocess
import xml.etree.ElementTree as ET
from contextlib import contextmanager
from subprocess import call, check_output
from cry_utils import append_to_unique_list, get_command_line_limit
from third_party import is_third_party_uselib_configured
from waflib import Context, TaskGen, Build, Utils, Node, Logs, Options, Errors
from waflib.Build import POST_LAZY, POST_AT_ONCE
from waflib.Configure import conf
from waflib.Task import Task, ASK_LATER, RUN_ME, SKIP_ME
from waflib.TaskGen import feature, extension, before, before_method, after, after_method, taskgen_method
from waflib.Tools import ccroot
ccroot.USELIB_VARS['android'] = set([ 'AAPT', 'AAPT_RESOURCES', 'AAPT_INCLUDES', 'AAPT_PACKAGE_FLAGS' ])
################################################################
# Defaults #
BUILDER_DIR = 'Code/Launcher/AndroidLauncher/ProjectBuilder'
BUILDER_FILES = 'android_builder.json'
ANDROID_LIBRARY_FILES = 'android_libraries.json'
RESOLUTION_MESSAGE = 'Please re-run Setup Assistant with "Compile For Android" enabled and run the configure command again.'
RESOLUTION_SETTINGS = ( 'mdpi', 'hdpi', 'xhdpi', 'xxhdpi', 'xxxhdpi' )
LATEST_KEYWORD = 'latest'
ANDROID_CACHE_FOLDER = 'AndroidCache'
APK_WITH_ASSETS_SUFFIX = '_w_assets'
# these are the default names for application icons and splash images
APP_ICON_NAME = 'app_icon.png'
APP_SPLASH_NAME = 'app_splash.png'
# supported api versions
SUPPORTED_APIS = [
'android-19',
'android-21',
'android-22',
'android-23',
'android-24',
'android-25',
'android-26',
]
MIN_ARMv8_API = 'android-21'
# while some earlier versions may work, it's probably best to enforce a min version of the build tools
MIN_BUILD_TOOLS_VERSION = '19.1.0'
# known build tools versions with stablity issues
UNSUPPORTED_BUILD_TOOLS_VERSIONS = {
'win32' : [
'24.0.0' # works fine on win 7 machines but consistantly crashes on win 10 machines
]
}
# build tools versions marked as obsolete by the Android SDK manager
OBSOLETE_BUILD_TOOLS_VERSIONS = [
'21.0.0',
'21.0.1',
'21.0.2',
'21.1.0',
'21.1.1',
'22.0.0',
'23.0.0'
]
# 'defines' for the different asset deployment modes
ASSET_DEPLOY_LOOSE = 'loose'
ASSET_DEPLOY_PAKS = 'paks'
ASSET_DEPLOY_PROJECT_SETTINGS = 'project_settings'
ASSET_DEPLOY_MODES = [
ASSET_DEPLOY_LOOSE,
ASSET_DEPLOY_PAKS,
ASSET_DEPLOY_PROJECT_SETTINGS
]
# root types
ACCESS_NORMAL = 0 # the device is not rooted, we do not have access to any elevated permissions
ACCESS_ROOT_ADBD = 1 # the device is rooted, we have elevated permissions at the adb level
ACCESS_SHELL_SU = 2 # the device is rooted, we only have elevated permissions using 'adb shell su -c'
# The default permissions for installed libriares on device.
LIB_FILE_PERMISSIONS = '755'
# The default owner:group for installed libraries on device.
LIB_OWNER_GROUP = 'system:system'
# the default file permissions after copies are made. 511 => 'chmod 777 <file>'
FILE_PERMISSIONS = 511
AUTO_GEN_HEADER_PYTHON = r'''
################################################################
# This file was automatically created by WAF
# WARNING! All modifications will be lost!
################################################################
'''
# #
################################################################
################################################################
"""
Parts of the Android build process require the ability to create directory junctions/symlinks to make sure some assets are properly
included in the build while maintaining as small of a memory footprint as possible (don't want to make copies). Since we only care
about directories, we don't need the full power of os.symlink (doesn't work on windows anyway and writing one would require either
admin privileges or running something such as a VB script to create a shortcut to bypass the admin issue; neither of those options
are desirable). The following functions are to make it explicitly clear we only care about directory links.
"""
def junction_directory(source, link_name):
if not os.path.isdir(source):
Logs.error("[ERROR] Attempting to make a junction to a file, which is not supported. Unexpected behaviour may result.")
return
if Utils.unversioned_sys_platform() == "win32":
cleaned_source_name = '"' + source.replace('/', '\\') + '"'
cleaned_link_name = '"' + link_name.replace('/', '\\') + '"'
# mklink generaully requires admin privileges, however directory junctions don't.
# subprocess.check_call will auto raise.
subprocess.check_call('mklink /D /J %s %s' % (cleaned_link_name, cleaned_source_name), shell=True)
else:
os.symlink(source, link_name)
def remove_junction(junction_path):
"""
Wrapper for correctly deleting a symlink/junction regardless of host platform
"""
if Utils.unversioned_sys_platform() == "win32":
os.rmdir(junction_path)
else:
os.unlink(junction_path)
@contextmanager
def push_dir(directory):
"""
Temporarily changes the current working directory. By decorating it with the contexmanager, makes this function only
useable in "with" statements, otherwise its a no-op. When the "with" statement is executed, this function will run
till the yeild, then run what's inside the "with" statement and finally run what's after the yeild.
"""
previous_dir = os.getcwd()
os.chdir(directory)
yield
os.chdir(previous_dir)
################################################################
@feature('cshlib', 'cxxshlib')
@after_method('apply_link')
def apply_so_name(self):
"""
Adds the linker flag to set the DT_SONAME in ELF shared objects. The
name used here will be used instead of the file name when the dynamic
linker attempts to load the shared object
"""
if 'android' in self.bld.env['PLATFORM'] and self.env.SONAME_ST:
flag = self.env.SONAME_ST % self.link_task.outputs[0]
self.env.append_value('LINKFLAGS', flag.split())
################################################################
@conf
def get_android_api_lib_list(ctx):
"""
Gets a list of android apis that pre-built libs could be built against based
on the current build target e.g. NDK_PLATFORM
"""
api_list = sorted(SUPPORTED_APIS)
ndk_platform = ctx.get_android_ndk_platform()
try:
index = api_list.index(ndk_platform)
except:
ctx.fatal('[ERROR] Unsupported Android NDK platform version %s', ndk_platform)
else:
# we can only use libs built with api levels lower or equal to the one being used
return api_list[:index + 1] # end index is exclusive, so we add 1
################################################################
@conf
def is_android_armv8_api_valid(ctx):
"""
Checks to make sure desired API level meets the min spec for ARMv8 targets
"""
ndk_platform = ctx.get_android_ndk_platform()
return (ndk_platform >= MIN_ARMv8_API)
################################################################
def remove_file_and_empty_directory(directory, file_name):
"""
Helper function for deleting a file and directory, if empty
"""
file_path = os.path.join(directory, file_name)
# first delete the file, if it exists
if os.path.exists(file_path):
os.remove(file_path)
# then remove the directory, if it exists and is empty
if os.path.exists(directory) and not os.listdir(directory):
os.rmdir(directory)
################################################################
def remove_readonly(func, path, _):
'''Clear the readonly bit and reattempt the removal'''
os.chmod(path, stat.S_IWRITE)
func(path)
################################################################
def construct_source_path(conf, project, source_path):
"""
Helper to construct the source path to an asset override such as
application icons or splash screen images
"""
if os.path.isabs(source_path):
path_node = conf.root.make_node(source_path)
else:
relative_path = os.path.join('Code', project, 'Resources', source_path)
path_node = conf.path.make_node(relative_path)
return path_node.abspath()
################################################################
def clear_splash_assets(project_node, path_prefix):
target_directory = project_node.make_node(path_prefix)
remove_file_and_empty_directory(target_directory.abspath(), APP_SPLASH_NAME)
for resolution in RESOLUTION_SETTINGS:
# The xxxhdpi resolution is only for application icons, its overkill to include them for drawables... for now
if resolution == 'xxxhdpi':
continue
target_directory = project_node.make_node(path_prefix + '-' + resolution)
remove_file_and_empty_directory(target_directory.abspath(), APP_SPLASH_NAME)
################################################################
def options(opt):
group = opt.add_option_group('android-specific config')
group.add_option('--android-toolchain', dest = 'android_toolchain', action = 'store', default = '', help = 'DEPRECATED: Android toolchain to use for building, valid options are gcc or clang')
group.add_option('--use-incredibuild-android', dest = 'use_incredibuild_android', action = 'store', default = 'False', help = 'DEPRECATED: Use --use-incredibuild instead to enable Incredibuild for Android.')
group.add_option('--android-sdk-version-override', dest = 'android_sdk_version_override', action = 'store', default = '', help = 'Override the Android SDK version used in the Java compilation. Only works during configure.')
group.add_option('--android-ndk-platform-override', dest = 'android_ndk_platform_override', action = 'store', default = '', help = 'Override the Android NDK platform version used in the native compilation. Only works during configure.')
group.add_option('--dev-store-pass', dest = 'dev_store_pass', action = 'store', default = 'Lumberyard', help = 'The store password for the development keystore')
group.add_option('--dev-key-pass', dest = 'dev_key_pass', action = 'store', default = 'Lumberyard', help = 'The key password for the development keystore')
group.add_option('--distro-store-pass', dest = 'distro_store_pass', action = 'store', default = '', help = 'The store password for the distribution keystore')
group.add_option('--distro-key-pass', dest = 'distro_key_pass', action = 'store', default = '', help = 'The key password for the distribution keystore')
group.add_option('--android-apk-path', dest = 'apk_path', action = 'store', default = '', help = 'Path to apk to deploy. If not specified the default build path will be used')
group.add_option('--from-editor-deploy', dest = 'from_editor_deploy', action = 'store_true', default = False, help = 'Signals that the build is coming from the editor deployment tool')
group.add_option('--deploy-android-attempt-libs-only', dest = 'deploy_android_attempt_libs_only', action = 'store_true', default = False,
help = 'Will only push the changed native libraries. If "deploy_android_executable" is enabled, it will take precedent if modified. Option ignored if "deploy_android_clean_device" is enabled. This feature is only available for "unlocked" devices.')
################################################################
def configure(conf):
env = conf.env
# validate the stored sdk and ndk paths from SetupAssistant
sdk_root = conf.get_env_file_var('LY_ANDROID_SDK', required = True)
ndk_root = conf.get_env_file_var('LY_ANDROID_NDK', required = True)
if not (sdk_root and ndk_root):
missing_paths = []
missing_paths += ['Android SDK'] if not sdk_root else []
missing_paths += ['Android NDK'] if not ndk_root else []
conf.fatal('[ERROR] Missing paths from Setup Assistant detected for: {}. {}'.format(', '.join(missing_paths), RESOLUTION_MESSAGE))
env['ANDROID_SDK_HOME'] = sdk_root
env['ANDROID_NDK_HOME'] = ndk_root
# get the revision of the NDK
with open(os.path.join(ndk_root, 'source.properties')) as ndk_props_file:
for line in ndk_props_file.readlines():
tokens = line.split('=')
trimed_tokens = [token.strip() for token in tokens]
if 'Pkg.Revision' in trimed_tokens:
ndk_rev = trimed_tokens[1]
env['ANDROID_NDK_REV_FULL'] = ndk_rev
ndk_rev_tokens = ndk_rev.split('.')
env['ANDROID_NDK_REV_MAJOR'] = int(ndk_rev_tokens[0])
env['ANDROID_NDK_REV_MINOR'] = int(ndk_rev_tokens[1])
headers_type = 'unified platform' if conf.is_using_android_unified_headers() else 'platform specific'
Logs.debug('android: Using {} headers with Android NDK revision {}.'.format(headers_type, ndk_rev))
# validate the desired SDK version
installed_sdk_versions = os.listdir(os.path.join(sdk_root, 'platforms'))
valid_sdk_versions = [platorm for platorm in installed_sdk_versions if platorm in SUPPORTED_APIS]
Logs.debug('android: Valid installed SDK versions are: {}'.format(valid_sdk_versions))
sdk_version = Options.options.android_sdk_version_override
if not sdk_version:
sdk_version = conf.get_android_sdk_version()
if sdk_version.lower() == LATEST_KEYWORD:
if not valid_sdk_versions:
conf.fatal('[ERROR] Unable to detect a valid Android SDK version installed in path {}. '
'Please use the Android SDK Manager to download an appropriate SDK version and run the configure command again.\n'
'\t-> Supported APIs installed are: {}'.format(sdk_root, ', '.join(SUPPORTED_APIS)))
valid_sdk_versions = sorted(valid_sdk_versions)
sdk_version = valid_sdk_versions[-1]
Logs.debug('android: Using the latest installed Android SDK version {}'.format(sdk_version))
else:
if sdk_version not in SUPPORTED_APIS:
conf.fatal('[ERROR] Android SDK version - {} - is unsupported. Please change SDK_VERSION in _WAF_/android/android_setting.json to a supported API and run the configure command again.\n'
'\t-> Supported APIs are: {}'.format(sdk_version, ', '.join(SUPPORTED_APIS)))
if sdk_version not in valid_sdk_versions:
conf.fatal('[ERROR] Failed to find Android SDK version - {} - installed in path {}. '
'Please use the Android SDK Manager to download the appropriate SDK version or change SDK_VERSION in _WAF_/android/android_settings.json to a supported version installed and run the configure command again.\n'
'\t-> Supported APIs installed are: {}'.format(sdk_version, sdk_root, ', '.join(valid_sdk_versions)))
env['ANDROID_SDK_VERSION'] = sdk_version
env['ANDROID_SDK_VERSION_NUMBER'] = int(sdk_version.split('-')[1])
# validate the desired NDK platform version
ndk_platform = Options.options.android_ndk_platform_override
if not ndk_platform:
ndk_platform = conf.get_android_ndk_platform()
ndk_sdk_match = False
if not ndk_platform:
Logs.debug('android: The Android NDK platform version has not been specified. Auto-detecting from specified Android SDK version {}.'.format(sdk_version))
ndk_platform = sdk_version
ndk_sdk_match = True
ndk_platforms = os.listdir(os.path.join(ndk_root, 'platforms'))
valid_ndk_platforms = [platorm for platorm in ndk_platforms if platorm in SUPPORTED_APIS]
Logs.debug('android: Valid NDK platforms for revision {} are: {}'.format(ndk_rev, valid_ndk_platforms))
if ndk_platform not in valid_ndk_platforms:
if ndk_sdk_match:
# search the valid ndk platforms for one that is closest, but lower, than the desired sdk version
sorted_valid_platforms = sorted(valid_ndk_platforms)
for platform in sorted_valid_platforms:
if platform <= sdk_version:
ndk_platform = platform
Logs.debug('android: Closest Android NDK platform version detected from Android SDK version {} is {}'.format(sdk_version, ndk_platform))
else:
platform_list = ', '.join(valid_ndk_platforms)
conf.fatal("[ERROR] Attempting to use a NDK platform - {} - that is either unsupported or doesn't have platform specific headers. "
"Please set NDK_PLATFORM in _WAF_/android/android_settings.json to a valid platform or remove to auto-detect from SDK_VERSION and run the configure command again.\n"
"\t-> Valid platforms for NDK {} include: {}".format(ndk_platform, ndk_rev, platform_list))
env['ANDROID_NDK_PLATFORM'] = ndk_platform
env['ANDROID_NDK_PLATFORM_NUMBER'] = int(ndk_platform.split('-')[1])
# final check is to make sure the ndk platform <= sdk version to ensure compatibilty
if not (ndk_platform <= sdk_version):
conf.fatal('[ERROR] The Android API specified in NDK_PLATFORM - {} - is newer than the API specified in SDK_VERSION - {}; this can lead to compatibilty issues.\n'
'Please update your _WAF_/android/android_settings.json to make sure NDK_PLATFORM <= SDK_VERSION and run the configure command again.'.format(ndk_platform, sdk_version))
# validate the desired SDK build-tools version
build_tools_version = conf.get_android_build_tools_version()
build_tools_dir = os.path.join(sdk_root, 'build-tools')
build_tools_dir_contents = os.listdir(build_tools_dir)
host_platform = Utils.unversioned_sys_platform()
host_unsupported_build_tools_versions = UNSUPPORTED_BUILD_TOOLS_VERSIONS.get(host_platform, [])
unusable_build_tools_versions = host_unsupported_build_tools_versions + OBSOLETE_BUILD_TOOLS_VERSIONS
installed_build_tools_versions = [ entry for entry in build_tools_dir_contents if entry.split('.')[0].isdigit() ]
valid_build_tools_versions = [ entry for entry in installed_build_tools_versions if entry >= MIN_BUILD_TOOLS_VERSION and entry not in unusable_build_tools_versions ]
Logs.debug('android: Valid installed build-tools versions are: {}'.format(valid_build_tools_versions))
if build_tools_version.lower() == LATEST_KEYWORD:
if not valid_build_tools_versions:
conf.fatal('[ERROR] Unable to detect a valid Android SDK build-tools version installed in path {}. Please use the Android SDK Manager to download build-tools '
'version {} or higher and run the configure command again. Also note the following versions are unsupported:\n'
'\t-> {}'.format(sdk_root, MIN_BUILD_TOOLS_VERSION, ', '.join(host_unsupported_build_tools_versions)))
valid_build_tools_versions = sorted(valid_build_tools_versions)
build_tools_version = valid_build_tools_versions[-1]
Logs.debug('android: Using the latest installed Android SDK build tools version {}'.format(build_tools_version))
elif build_tools_version not in valid_build_tools_versions:
if build_tools_version in OBSOLETE_BUILD_TOOLS_VERSIONS:
Logs.warn('[WARN] The build-tools versions selected - {} - has been marked as obsolete by Google. Consider using a different version of the build tools by '
'changing BUILD_TOOLS_VER in _WAF_/android/android_settings.json to "latest" or a version mentioned below and run the configure command again.\n'
'\t-> Valid installed build-tools version detected: {}'.format(build_tools_version, ', '.join(valid_build_tools_versions)))
else:
conf.fatal('[ERROR] The build-tools versions selected - {} - was either not found in path {} or unsupported. Please use Android SDK Manager to download the appropriate build-tools version '
'or change BUILD_TOOLS_VER in _WAF_/android/android_settings.json to either a valid version installed or "latest" and run the configure command again.\n'
'\t-> Valid installed build-tools version detected: {}'.format(build_tools_version, build_tools_dir, ', '.join(valid_build_tools_versions)))
conf.env['ANDROID_BUILD_TOOLS_VER'] = build_tools_version
################################################################
@conf
def is_using_android_unified_headers(conf):
"""
NDK r14 introduced unified headers which is to replace the old platform specific set
of headers in previous versions of the NDK. There is a bug in one of the headers
(strerror_r isn't proper defined) in this version but fixed in NDK r14b. Because of
this we will make NDK r14b our min spec for using the unified headers.
"""
env = conf.env
ndk_rev_major = env['ANDROID_NDK_REV_MAJOR']
ndk_rev_minor = env['ANDROID_NDK_REV_MINOR']
return ((ndk_rev_major == 14 and ndk_rev_minor >= 1) or (ndk_rev_major >= 15))
################################################################
@conf
def load_android_toolchains(conf, search_paths, CC, CXX, AR, STRIP, **addition_toolchains):
"""
Loads the native toolchains from the Android NDK
"""
try:
conf.find_program(CC, var = 'CC', path_list = search_paths, silent_output = True)
conf.find_program(CXX, var = 'CXX', path_list = search_paths, silent_output = True)
conf.find_program(AR, var = 'AR', path_list = search_paths, silent_output = True)
# for debug symbol stripping
conf.find_program(STRIP, var = 'STRIP', path_list = search_paths, silent_output = True)
# optional linker override
if 'LINK_CC' in addition_toolchains and 'LINK_CXX' in addition_toolchains:
conf.find_program(addition_toolchains['LINK_CC'], var = 'LINK_CC', path_list = search_paths, silent_output = True)
conf.find_program(addition_toolchains['LINK_CXX'], var = 'LINK_CXX', path_list = search_paths, silent_output = True)
else:
conf.env['LINK_CC'] = conf.env['CC']
conf.env['LINK_CXX'] = conf.env['CXX']
conf.env['LINK'] = conf.env['LINK_CC']
# common cc settings
conf.cc_load_tools()
conf.cc_add_flags()
# common cxx settings
conf.cxx_load_tools()
conf.cxx_add_flags()
# common link settings
conf.link_add_flags()
except:
Logs.error('[ERROR] Failed to find the Android NDK standalone toolchain(s) in search path %s' % search_paths)
return False
return True
################################################################
@conf
def load_android_tools(conf):
"""
Loads the necessary build tools from the Android SDK
"""
android_sdk_home = conf.env['ANDROID_SDK_HOME']
build_tools_version = conf.get_android_build_tools_version()
build_tools_dir = os.path.join(android_sdk_home, 'build-tools', build_tools_version)
try:
conf.find_program('aapt', var = 'AAPT', path_list = [ build_tools_dir ], silent_output = True)
conf.find_program('aidl', var = 'AIDL', path_list = [ build_tools_dir ], silent_output = True)
conf.find_program('dx', var = 'DX', path_list = [ build_tools_dir ], silent_output = True)
conf.find_program('zipalign', var = 'ZIPALIGN', path_list = [ build_tools_dir ], silent_output = True)
except:
Logs.error('[ERROR] The desired Android SDK build-tools version - {} - appears to be incomplete. Please use Android SDK Manager to validate the build-tools version installation '
'or change BUILD_TOOLS_VER in _WAF_/android/android_settings.json to either a version installed or "latest" and run the configure command again.'.format(build_tools_version))
return False
return True
################################################################
@conf
def get_android_cache_node(conf):
return conf.get_bintemp_folder_node().make_node(ANDROID_CACHE_FOLDER)
################################################################
@conf
def add_to_android_cache(conf, path_to_resource):
"""
Adds resource files from outside the engine folder into a local cache directory so they can be used by WAF tasks.
Returns the path of the new resource file relative the cache root.
"""
cache_node = get_android_cache_node(conf)
cache_node.mkdir()
dest_node = cache_node
if conf.env['ANDROID_ARCH']:
dest_node = cache_node.make_node(conf.env['ANDROID_ARCH'])
dest_node.mkdir()
file_name = os.path.basename(path_to_resource)
files_node = dest_node.make_node(file_name)
files_node.delete()
shutil.copy2(path_to_resource, files_node.abspath())
files_node.chmod(FILE_PERMISSIONS)
rel_path = files_node.path_from(cache_node)
Logs.debug('android: Adding resource - {} - to Android cache'.format(rel_path))
return rel_path
################################################################
def process_json(conf, json_data, curr_node, root_node, template, copied_files):
for elem in json_data:
if elem == 'NO_OP':
continue
if os.path.isabs(elem):
source_curr = conf.root.make_node(elem)
else:
source_curr = root_node.make_node(elem)
target_curr = curr_node.make_node(elem)
if isinstance(json_data, dict):
# resolve name overrides for the copy, if specified
if isinstance(json_data[elem], unicode) or isinstance(json_data[elem], str):
target_curr = curr_node.make_node(json_data[elem])
# otherwise continue processing the tree
else:
target_curr.mkdir()
process_json(conf, json_data[elem], target_curr, root_node, template, copied_files)
continue
# leaf handing
if imghdr.what(source_curr.abspath()) in ( 'rgb', 'gif', 'pbm', 'ppm', 'tiff', 'rast', 'xbm', 'jpeg', 'bmp', 'png' ):
shutil.copyfile(source_curr.abspath(), target_curr.abspath())
else:
transformed_text = string.Template(source_curr.read()).substitute(template)
target_curr.write(transformed_text)
target_curr.chmod(FILE_PERMISSIONS)
copied_files.append(target_curr.abspath())
################################################################
def copy_and_patch_android_libraries(conf, source_node, android_root):
"""
Copy the libraries that need to be patched and do the patching of the files.
"""
class _Library:
def __init__(self, name, path):
self.name = name
self.path = path
self.patch_files = []
def add_file_to_patch(self, file):
self.patch_files.append(file)
class _File:
def __init__(self, path):
self.path = path
self.changes = []
def add_change(self, change):
self.changes.append(change)
class _Change:
def __init__(self, line, old, new):
self.line = line
self.old = old
self.new = new
lib_src_file = source_node.make_node(ANDROID_LIBRARY_FILES)
json_data = conf.parse_json_file(lib_src_file)
if not json_data:
conf.fatal('[ERROR] Android library settings (%s) not found or invalid.' % ANDROID_LIBRARY_FILES)
return False
# Collect the libraries that need to be patched
libs_to_patch = []
for libName, value in json_data.iteritems():
# The library is in different places depending on the revision, so we must check multiple paths.
srcDir = None
for path in value['srcDir']:
path = string.Template(path).substitute(conf.env)
if os.path.exists(path):
srcDir = path
break
if not srcDir:
conf.fatal('[ERROR] Failed to find library - %s - in path(s) [%s]. Please download the library from the Android SDK Manager and run the configure command again.'
% (libName, ", ".join(string.Template(path).substitute(conf.env) for path in value['srcDir'])))
return False
if 'patches' in value:
lib_to_patch = _Library(libName, srcDir)
for patch in value['patches']:
file_to_patch = _File(patch['path'])
for change in patch['changes']:
lineNum = change['line']
oldLines = change['old']
newLines = change['new']
for oldLine in oldLines[:-1]:
change = _Change(lineNum, oldLine, (newLines.pop() if newLines else None))
file_to_patch.add_change(change)
lineNum += 1
else:
change = _Change(lineNum, oldLines[-1], ('\n'.join(newLines) if newLines else None))
file_to_patch.add_change(change)
lib_to_patch.add_file_to_patch(file_to_patch)
libs_to_patch.append(lib_to_patch)
# Patch the libraries
for lib in libs_to_patch:
cur_path = conf.path.abspath()
rel_path = conf.get_android_patched_libraries_relative_path()
dest_path = os.path.join(cur_path, rel_path, lib.name)
shutil.rmtree(dest_path, ignore_errors=True, onerror=remove_readonly)
shutil.copytree(lib.path, dest_path)
for file in lib.patch_files:
inputFilePath = os.path.join(lib.path, file.path)
outputFilePath = os.path.join(dest_path, file.path)
with open(inputFilePath) as inputFile:
lines = inputFile.readlines()
with open(outputFilePath, 'w') as outFile:
for replace in file.changes:
lines[replace.line] = string.replace(lines[replace.line], replace.old, (replace.new if replace.new else ""), 1)
outFile.write(''.join([line for line in lines if line]))
return True
################################################################
@conf
def create_and_add_android_launchers_to_build(conf):
"""
This function will generate the bare minimum android project
and include the new android launcher(s) in the build path.
So no Android Studio gradle files will be generated.
"""
android_root = conf.path.make_node(conf.get_android_project_relative_path())
android_root.mkdir()
if conf.is_engine_local():
source_node = conf.path.make_node(BUILDER_DIR)
else:
source_node = conf.root.make_node(os.path.abspath(os.path.join(conf.engine_path,BUILDER_DIR)))
builder_file_src = source_node.make_node(BUILDER_FILES)
builder_file_dest = conf.path.get_bld().make_node(BUILDER_DIR)
if not os.path.exists(builder_file_src.abspath()):
conf.fatal('[ERROR] Failed to find the Android project builder - %s - in path %s. Verify file exists and run the configure command again.' % (BUILDER_FILES, BUILDER_DIR))
return False
created_directories = []
for project in conf.get_enabled_game_project_list():
# make sure the project has android options specified
if conf.get_android_settings(project) == None:
Logs.warn('[WARN] Android settings not found in %s/project.json, skipping.' % project)
continue
proj_root = android_root.make_node(conf.get_executable_name(project))
proj_root.mkdir()
created_directories.append(proj_root.path_from(android_root))
proj_src_path = os.path.join(proj_root.abspath(), 'src')
if os.path.exists(proj_src_path):
shutil.rmtree(proj_src_path, ignore_errors=True, onerror=remove_readonly)
# setup the macro replacement map for the builder files
activity_name = '%sActivity' % project
transformed_package = conf.get_android_package_name(project).replace('.', '/')
template = {
'ANDROID_PACKAGE' : conf.get_android_package_name(project),
'ANDROID_PACKAGE_PATH' : transformed_package,
'ANDROID_APP_NAME' : conf.get_launcher_product_name(project), # external facing name
'ANDROID_PROJECT_NAME' : project, # internal facing name
'ANDROID_PROJECT_ACTIVITY' : activity_name,
'ANDROID_LAUNCHER_NAME' : conf.get_executable_name(project), # first native library to load from java
'ANDROID_VERSION_NUMBER' : conf.get_android_version_number(project),
'ANDROID_VERSION_NAME' : conf.get_android_version_name(project),
'ANDROID_SCREEN_ORIENTATION' : conf.get_android_orientation(project),
'ANDROID_APP_PUBLIC_KEY' : conf.get_android_app_public_key(project),
'ANDROID_APP_OBFUSCATOR_SALT' : conf.get_android_app_obfuscator_salt(project),
'ANDROID_USE_MAIN_OBB' : conf.get_android_use_main_obb(project),
'ANDROID_USE_PATCH_OBB' : conf.get_android_use_patch_obb(project),
'ANDROID_ENABLE_KEEP_SCREEN_ON' : conf.get_android_enable_keep_screen_on(project),
'ANDROID_MIN_SDK_VERSION' : conf.env['ANDROID_NDK_PLATFORM_NUMBER'],
'ANDROID_TARGET_SDK_VERSION' : conf.env['ANDROID_SDK_VERSION_NUMBER'],
}
# update the builder file with the correct package name
transformed_node = builder_file_dest.find_or_declare('%s_builder.json' % project)
transformed_text = string.Template(builder_file_src.read()).substitute(template)
transformed_node.write(transformed_text)
# process the builder file and create project
copied_files = []
json_data = conf.parse_json_file(transformed_node)
process_json(conf, json_data, proj_root, source_node, template, copied_files)
# resolve the application icon overrides
resource_node = proj_root.make_node(['src', 'main', 'res'])
icon_overrides = conf.get_android_app_icons(project)
if icon_overrides is not None:
mipmap_path_prefix = 'mipmap'
# if a default icon is specified, then copy it into the generic mipmap folder
default_icon = icon_overrides.get('default', None)
if default_icon is not None:
default_icon_source_node = construct_source_path(conf, project, default_icon)
default_icon_target_dir = resource_node.make_node(mipmap_path_prefix)
default_icon_target_dir.mkdir()
dest_file = os.path.join(default_icon_target_dir.abspath(), APP_ICON_NAME)
shutil.copyfile(default_icon_source_node, dest_file)
os.chmod(dest_file, FILE_PERMISSIONS)
copied_files.append(dest_file)
else:
Logs.debug('android: No default icon override specified for %s' % project)
# process each of the resolution overrides
for resolution in RESOLUTION_SETTINGS:
target_directory = resource_node.make_node(mipmap_path_prefix + '-' + resolution)
# get the current resolution icon override
icon_source = icon_overrides.get(resolution, default_icon)
if icon_source is default_icon:
# if both the resolution and the default are unspecified, warn the user but do nothing
if icon_source is None:
Logs.warn('[WARN] No icon override found for "%s". Either supply one for "%s" or a "default" in the android_settings "icon" section of the project.json file for %s' % (resolution, resolution, project))
# if only the resoultion is unspecified, remove the resolution specific version from the project
else:
Logs.debug('android: Default icon being used for "%s" in %s' % (resolution, project))
remove_file_and_empty_directory(target_directory.abspath(), APP_ICON_NAME)
continue
icon_source_node = construct_source_path(conf, project, icon_source)
icon_target_node = target_directory.make_node(APP_ICON_NAME)
shutil.copyfile(icon_source_node, icon_target_node.abspath())
icon_target_node.chmod(FILE_PERMISSIONS)
copied_files.append(icon_target_node.abspath())
# resolve the application splash screen overrides
splash_overrides = conf.get_android_app_splash_screens(project)
if splash_overrides is not None:
drawable_path_prefix = 'drawable-'
for orientation in ('land', 'port'):
orientation_path_prefix = drawable_path_prefix + orientation
oriented_splashes = splash_overrides.get(orientation, {})
# if a default splash image is specified for this orientation, then copy it into the generic drawable-<orientation> folder
default_splash_img = oriented_splashes.get('default', None)
if default_splash_img is not None:
default_splash_img_source_node = construct_source_path(conf, project, default_splash_img)
default_splash_img_target_dir = resource_node.make_node(orientation_path_prefix)
default_splash_img_target_dir.mkdir()
dest_file = os.path.join(default_splash_img_target_dir.abspath(), APP_SPLASH_NAME)
shutil.copyfile(default_splash_img_source_node, dest_file)
os.chmod(dest_file, FILE_PERMISSIONS)
copied_files.append(dest_file)
else:
Logs.debug('android: No default splash screen override specified for "%s" orientation in %s' % (orientation, project))
# process each of the resolution overrides
for resolution in RESOLUTION_SETTINGS:
# The xxxhdpi resolution is only for application icons, its overkill to include them for drawables... for now
if resolution == 'xxxhdpi':
continue
target_directory = resource_node.make_node(orientation_path_prefix + '-' + resolution)
# get the current resolution splash image override
splash_img_source = oriented_splashes.get(resolution, default_splash_img)
if splash_img_source is default_splash_img:
# if both the resolution and the default are unspecified, warn the user but do nothing
if splash_img_source is None:
section = "%s-%s" % (orientation, resolution)
Logs.warn('[WARN] No splash screen override found for "%s". Either supply one for "%s" or a "default" in the android_settings "splash_screen-%s" section of the project.json file for %s' % (section, resolution, orientation, project))
# if only the resoultion is unspecified, remove the resolution specific version from the project
else:
Logs.debug('android: Default icon being used for "%s-%s" in %s' % (orientation, resolution, project))
remove_file_and_empty_directory(target_directory.abspath(), APP_SPLASH_NAME)
continue
splash_img_source_node = construct_source_path(conf, project, splash_img_source)
splash_img_target_node = target_directory.make_node(APP_SPLASH_NAME)
shutil.copyfile(splash_img_source_node, splash_img_target_node.abspath())
splash_img_target_node.chmod(FILE_PERMISSIONS)
copied_files.append(splash_img_target_node.abspath())
# additional optimization to only include the splash screens for the avaiable orientations allowed by the manifest
requested_orientation = conf.get_android_orientation(project)
if requested_orientation in ('landscape', 'reverseLandscape', 'sensorLandscape', 'userLandscape'):
Logs.debug('android: Clearing the portrait assets from %s' % project)
clear_splash_assets(resource_node, 'drawable-port')
elif requested_orientation in ('portrait', 'reversePortrait', 'sensorPortrait', 'userPortrait'):
Logs.debug('android: Clearing the landscape assets from %s' % project)
clear_splash_assets(resource_node, 'drawable-land')
# delete all files from the destination folder that were not copied by the script
all_files = proj_root.ant_glob("**", excl=['wscript', 'build.gradle', 'assets_for_apk/*'])
files_to_delete = [path for path in all_files if path.abspath() not in copied_files]
for file in files_to_delete:
file.delete()
# add all the projects to the root wscript
android_wscript = android_root.make_node('wscript')
with open(android_wscript.abspath(), 'w') as wscript_file:
w = wscript_file.write
w(AUTO_GEN_HEADER_PYTHON)
w('SUBFOLDERS = [\n')
w('\t\'%s\'\n]\n\n' % '\',\n\t\''.join(created_directories))
w('def build(bld):\n')
w('\tvalid_subdirs = [x for x in SUBFOLDERS if bld.path.find_node("%s/wscript" % x)]\n')
w('\tbld.recurse(valid_subdirs)\n')
# Some Android SDK libraries have bugs, so we need to copy them locally and patch them.
if not copy_and_patch_android_libraries(conf, source_node, android_root):
return False
return True
################################################################
@conf
def is_module_for_game_project(self, module_name, game_project, project_name):
"""
Check to see if the task generator is part of the build for a particular game project.
The following rules apply:
1. It is a gem requested by the game project
2. It is the game project / project's launcher
3. It is part of the general modules list
"""
enabled_game_projects = self.get_enabled_game_project_list()
if self.is_gem(module_name):
gem_name_list = [gem.name for gem in self.get_game_gems(game_project)]
return (True if module_name in gem_name_list else False)
elif module_name == game_project or game_project == project_name:
return True
elif module_name not in enabled_game_projects and project_name is None:
return True
return False
################################################################
def collect_source_paths(android_task, src_path_tag):
game_project = android_task.game_project
bld = android_task.bld
platform = bld.env['PLATFORM']
config = bld.env['CONFIGURATION']
search_tags = [
'android_{}'.format(src_path_tag),
'android_{}_{}'.format(config, src_path_tag),
'{}_{}'.format(platform, src_path_tag),
'{}_{}_{}'.format(platform, config, src_path_tag),
]
source_paths = []
for group in bld.groups:
for task_generator in group:
if not isinstance(task_generator, TaskGen.task_gen):
continue
Logs.debug('android: Processing task %s' % task_generator.name)
if not (getattr(task_generator, 'posted', None) and getattr(task_generator, 'link_task', None)):
Logs.debug('android: -> Task is NOT posted, Skipping...')
continue
project_name = getattr(task_generator, 'project_name', None)
if not bld.is_module_for_game_project(task_generator.name, game_project, project_name):
Logs.debug('android: -> Task is NOT part of the game project, Skipping...')
continue
raw_paths = []
for tag in search_tags:
raw_paths += getattr(task_generator, tag, [])
Logs.debug('android: -> Raw Source Paths %s' % raw_paths)
for path in raw_paths:
if os.path.isabs(path):
path = bld.root.make_node(path)
else:
path = task_generator.path.make_node(path)
source_paths.append(path)
return source_paths
################################################################
def get_resource_compiler_path(ctx):
if Utils.unversioned_sys_platform() == "win32":
paths = ['Bin64vc140', 'Bin64vc120', 'Bin64']
else:
paths = ['BinMac64', 'Bin64']
rc_search_paths = [os.path.join(ctx.path.abspath(), path, 'rc') for path in paths]
try:
return ctx.find_program('rc', path_list = rc_search_paths, silent_output = True)
except:
ctx.fatal('[ERROR] Failed to find the Resource Compiler in paths: {}'.format(paths))
def get_python_path(ctx):
python_cmd = 'python'
if Utils.unversioned_sys_platform() == "win32":
python_cmd = '"{}"'.format(ctx.path.find_resource('Tools/Python/python.cmd').abspath())
return python_cmd
def set_key_and_store_pass(ctx):
if ctx.get_android_build_environment() == 'Distribution':
key_pass = ctx.options.distro_key_pass
store_pass = ctx.options.distro_store_pass
if not (key_pass and store_pass):
ctx.fatal('[ERROR] Build environment is set to Distribution but --distro-key-pass or --distro-store-pass arguments were not specified or blank')
else:
key_pass = ctx.options.dev_key_pass
store_pass = ctx.options.dev_store_pass
ctx.env['KEYPASS'] = key_pass
ctx.env['STOREPASS'] = store_pass
################################################################
################################################################
class strip_debug_symbols(Task):
"""
Strips the debug symbols from a shared library
"""
color = 'CYAN'
run_str = "${STRIP} --strip-debug -o ${TGT} ${SRC}"
vars = [ 'STRIP' ]
def runnable_status(self):
if super(strip_debug_symbols, self).runnable_status() == ASK_LATER:
return ASK_LATER
src = self.inputs[0].abspath()
tgt = self.outputs[0].abspath()
# If the target file is missing, we need to run
try:
stat_tgt = os.stat(tgt)
except OSError:
return RUN_ME
# Now compare both file stats
try:
stat_src = os.stat(src)
except OSError:
pass
else:
CREATION_TIME_PADDING = 10
# only check timestamps
if stat_src.st_mtime >= (stat_tgt.st_mtime + CREATION_TIME_PADDING):
return RUN_ME
# Everything fine, we can skip this task
return SKIP_ME
################################################################
class aapt_package_base(Task):
"""
General purpose 'package' variant Android Asset Packaging Tool task
"""
color = 'PINK'
vars = [ 'AAPT', 'AAPT_RESOURCES', 'AAPT_INCLUDES', 'AAPT_PACKAGE_FLAGS' ]
def runnable_status(self):
def _to_list(value):
if isinstance(value, list):
return value
else:
return [ value ]
if not self.inputs:
self.inputs = []
aapt_resources = getattr(self.generator, 'aapt_resources', [])
assets = getattr(self, 'assets', [])
apk_layout = getattr(self, 'srcdir', [])
input_paths = _to_list(aapt_resources) + _to_list(assets) + _to_list(apk_layout)
for path in input_paths:
files = path.ant_glob('**/*')
self.inputs.extend(files)
android_manifest = getattr(self.generator, 'main_android_manifest', None)
if android_manifest:
self.inputs.append(android_manifest)
result = super(aapt_package_base, self).runnable_status()
if result == SKIP_ME:
for output in self.outputs:
if not os.path.isfile(output.abspath()):
return RUN_ME
return result
################################################################
class android_code_gen(aapt_package_base):
"""
Generates the R.java files from the Android resources
"""
run_str = '${AAPT} package -f -M ${ANDROID_MANIFEST} ${AAPT_RESOURCE_ST:AAPT_RESOURCES} ${AAPT_INLC_ST:AAPT_INCLUDES} ${AAPT_PACKAGE_FLAGS} -m -J ${OUTDIR}'
################################################################
class package_resources(aapt_package_base):
"""
Packages all the native resources from the Android project
"""
run_str = '${AAPT} package -f ${ANDROID_DEBUG_MODE} -M ${ANDROID_MANIFEST} ${AAPT_RESOURCE_ST:AAPT_RESOURCES} ${AAPT_INLC_ST:AAPT_INCLUDES} ${AAPT_PACKAGE_FLAGS} -F ${TGT}'
################################################################
class build_apk(aapt_package_base):
"""
Generates an unsigned, unaligned Android APK
"""
run_str = '${AAPT} package -f ${ANDROID_DEBUG_MODE} -M ${ANDROID_MANIFEST} ${AAPT_RESOURCE_ST:AAPT_RESOURCES} ${AAPT_INLC_ST:AAPT_INCLUDES} ${AAPT_ASSETS_ST:AAPT_ASSETS} ${AAPT_PACKAGE_FLAGS} -F ${TGT} ${SRCDIR}'
################################################################
class aapt_crunch(Task):
"""
Processes the PNG resources from the Android project
"""
color = 'PINK'
run_str = '${AAPT} crunch ${AAPT_RESOURCE_ST:AAPT_RESOURCES} -C ${TGT}'
vars = [ 'AAPT', 'AAPT_RESOURCES' ]
def runnable_status(self):
if not self.inputs:
self.inputs = []
for resource in self.generator.aapt_resources:
res = resource.ant_glob('**/*')
self.inputs.extend(res)
return super(aapt_crunch, self).runnable_status()
################################################################
class aidl(Task):
"""
Processes the Android interface files
"""
color = 'PINK'
run_str = '${AIDL} ${AIDL_PREPROC_ST:AIDL_PREPROCESSES} ${SRC} ${TGT}'
def runnable_status(self):
result = super(aidl, self).runnable_status()
if result == SKIP_ME:
for output in self.outputs:
if not os.path.isfile(output.abspath()):
return RUN_ME
return result
################################################################
class dex(Task):
"""
Compiles the .class files into the dalvik executable
"""
color = 'PINK'
run_str = '${DX} --dex --output ${TGT} ${JAR_INCLUDES} ${SRCDIR}'
def runnable_status(self):
for tsk in self.run_after:
if not tsk.hasrun:
return ASK_LATER
if not self.inputs:
self.inputs = []
srcdir = self.srcdir
if not isinstance(srcdir, list):
srcdir = [ srcdir ]
for src_node in srcdir:
self.inputs.extend(src_node.ant_glob('**/*.class', remove = False, quiet = True))
result = super(dex, self).runnable_status()
if result == SKIP_ME:
for output in self.outputs:
if not os.path.isfile(output.abspath()):
return RUN_ME
return result
################################################################
class zipalign(Task):
"""
Performs a specified byte alignment on the source file
"""
color = 'PINK'
run_str = '${ZIPALIGN} -f ${ZIPALIGN_SIZE} ${SRC} ${TGT}'
def runnable_status(self):
result = super(zipalign, self).runnable_status()
if result == SKIP_ME:
for output in self.outputs:
if not os.path.isfile(output.abspath()):
return RUN_ME
return result
################################################################
################################################################
@taskgen_method
def create_debug_strip_task(self, source_file, dest_location):
lib_name = os.path.basename(source_file.abspath())
output_node = dest_location.make_node(lib_name)
# For Android Studio we should just copy the libs because stripping is part of the build process.
# But we have issues with long path names that makes the stripping process to fail in Android Studio.
self.create_task('strip_debug_symbols', source_file, output_node)
################################################################
@feature('c', 'cxx')
@after_method('apply_link')
def add_android_natives_processing(self):
if 'android' not in self.env['PLATFORM']:
return
if not getattr(self, 'link_task', None):
return
if self._type == 'stlib': # Do not copy static libs
return
output_node = self.bld.get_output_folders(self.bld.env['PLATFORM'], self.bld.env['CONFIGURATION'])[0]
project_name = getattr(self, 'project_name', None)
game_projects = self.bld.get_enabled_game_project_list()
for src in self.link_task.outputs:
src_lib = output_node.make_node(src.name)
for game in game_projects:
game_build_native_key = '%s_BUILDER_NATIVES' % game
# If the game is a valid android project, a specific build native value will have been created during
# the project configuration. Only process games with valid android project settings
if not game_build_native_key in self.env:
continue
game_build_native_node = self.bld.root.find_dir(self.env[game_build_native_key])
if self.bld.is_module_for_game_project(self.name, game, project_name):
self.create_debug_strip_task(src_lib, game_build_native_node)
################################################################
@feature('c', 'cxx', 'copy_3rd_party_binaries')
@after_method('apply_link')
def add_3rd_party_library_stripping(self):
"""
Strip and copy 3rd party shared libraries so they are included into the APK.
"""
if 'android' not in self.env['PLATFORM'] or self.bld.spec_monolithic_build():
return
third_party_artifacts = self.env['COPY_3RD_PARTY_ARTIFACTS']
if third_party_artifacts:
game_projects = self.bld.get_enabled_game_project_list()
for source_node in third_party_artifacts:
_, ext = os.path.splitext(source_node.abspath())
# Only care about shared libraries
if ext == '.so':
for game in game_projects:
game_build_native_key = '%s_BUILDER_NATIVES' % game
# If the game is a valid android project, a specific build native value will have been created during
# the project configuration. Only process games with valid android project settings
if not game_build_native_key in self.env:
continue
game_build_native_node = self.bld.root.find_dir(self.env[game_build_native_key])
self.create_debug_strip_task(source_node, game_build_native_node)
################################################################
################################################################
@feature('wrapped_copy_outputs')
@before_method('process_source')
def create_copy_outputs(self):
self.meths.remove('process_source')
self.create_task('copy_outputs', self.source, self.target)
@taskgen_method
def sign_and_align_apk(self, base_apk_name, raw_apk, intermediate_folder, final_output, suffix = ''):
# first sign the apk with jarsigner
apk_name = '{}_unaligned{}.apk'.format(base_apk_name, suffix)
unaligned_apk = intermediate_folder.make_node(apk_name)
self.jarsign_task = jarsign_task = self.create_task('jarsigner', raw_apk, unaligned_apk)
# align the new apk with assets
apk_name = '{}{}.apk'.format(base_apk_name, suffix)
final_apk = final_output.make_node(apk_name)
self.zipalign_task = zipalign_task = self.create_task('zipalign', unaligned_apk, final_apk)
# chain the alignment to happen after signing
zipalign_task.set_run_after(jarsign_task)
################################################################
################################################################
@conf
def AndroidAPK(ctx, *k, **kw):
project_name = kw['project_name']
env = ctx.env
platform = env['PLATFORM']
configuration = env['CONFIGURATION']
if ctx.cmd in ('configure', 'generate_uber_files', 'generate_module_def_files', 'msvs'):
return
if not (ctx.is_android_platform(platform) or platform =='project_generator'):
return
if project_name not in ctx.get_enabled_game_project_list():
return
Logs.debug('android: ******************************** ')
Logs.debug('android: Processing {}...'.format(project_name))
root_input = ctx.path.get_src().make_node('src')
root_output = ctx.path.get_bld()
apk_layout_dir = root_output.make_node('builder')
# The variant name is constructed in the same fashion as how Gradle generates all it's build
# variants. After all the Gradle configurations and product flavors are evaluated, the variants
# are generated in the following lower camel case format {product_flavor}{configuration}.
# Our configuration and Gradle's configuration is a one to one mapping of what each describe,
# while our platform is effectively Gradle's product flavor.
gradle_variant = '{}{}'.format(platform, configuration.title())
# copy over the required 3rd party libs that need to be bundled into the apk
abi = env['ANDROID_ARCH']
if not abi:
abi = 'armeabi-v7a'
if ctx.options.from_android_studio:
local_native_libs_node = root_input.make_node([ gradle_variant, 'jniLibs', abi ])
else:
local_native_libs_node = apk_layout_dir.make_node([ 'lib', abi ])
local_native_libs_node.mkdir()
Logs.debug('android: APK builder path (native libs) -> {}'.format(local_native_libs_node.abspath()))
env['{}_BUILDER_NATIVES'.format(project_name)] = local_native_libs_node.abspath()
android_cache = get_android_cache_node(ctx)
libs_to_copy = env['EXT_LIBS']
for lib in libs_to_copy:
src = android_cache.make_node(lib)
lib_name = os.path.basename(lib)
tgt = local_native_libs_node.make_node(lib_name)
ctx(features = 'wrapped_copy_outputs', source = src, target = tgt)
# since we are having android studio building the apk we can kick out early
if ctx.options.from_android_studio:
return
# order of precedence from highest (primary) to lowest (inputs): full variant, build type, product flavor, main
local_variant_dirs = [ gradle_variant, configuration, platform, 'main' ]
java_source_nodes = []
android_manifests = []
resource_nodes = []
for source_dir in local_variant_dirs:
java_node = root_input.find_node([ source_dir, 'java' ])
if java_node:
java_source_nodes.append(java_node)
res_node = root_input.find_node([ source_dir, 'res' ])
if res_node:
resource_nodes.append(res_node)
manifest_node = root_input.find_node([ source_dir, 'AndroidManifest.xml' ])
if manifest_node:
android_manifests.append(manifest_node)
if not android_manifests:
ctx.fatal('[ERROR] Unable to find any AndroidManifest.xml files in project path {}.'.format(ctx.path.get_src().abspath()))
Logs.debug('android: Found local Java source directories {}'.format(java_source_nodes))
Logs.debug('android: Found local resouce directories {}'.format(resource_nodes))
Logs.debug('android: Found local manifest file {}'.format(android_manifests))
# get the keystore passwords
set_key_and_store_pass(ctx)
# Private function to add android libraries to the build
def _add_library(folder, libName, source_paths, manifests, package_names, resources):
'''
Collect the resources and package names of the specified library.
'''
if not folder:
Logs.error('[ERROR] Invalid folder for library {}. Please check the path in the {} file.'.format(libName, java_libs_json.abspath()))
return False
src = folder.find_dir('src')
if not src:
Logs.error("[ERROR] Could not find the 'src' folder for library {}. Please check that they are present at {}".format(libName, folder.abspath()))
return False
source_paths.append(src)
manifest = folder.find_node('AndroidManifest.xml')
if not manifest:
Logs.error("[ERROR] Could not find the AndroidManifest.xml folder for library {}. Please check that they are present at {}".format(libName, folder.abspath()))
return False
manifests.append(manifest)
tree = ET.parse(manifest.abspath())
root = tree.getroot()
package = root.get('package')
if not package:
Logs.error("[ERROR] Could not find 'package' node in {}. Please check that the manifest is valid ".format(manifest.abspath()))
return False
package_names.append(package)
res = folder.find_dir('res')
if res:
resources.append(res)
return True
library_packages = []
library_jars = []
java_libs_json = ctx.root.make_node(kw['android_java_libs'])
json_data = ctx.parse_json_file(java_libs_json)
if json_data:
for libName, value in json_data.iteritems():
if 'libs' in value:
# Collect any java lib that is needed so we can add it to the classpath.
for java_lib in value['libs']:
jar_path = string.Template(java_lib['path']).substitute(env)
if os.path.exists(jar_path):
library_jars.append(jar_path)
elif java_lib['required']:
ctx.fatal('[ERROR] Required java lib - {} - was not found'.format(jar_path))
if 'patches' in value:
cur_path = ctx.srcnode.abspath()
rel_path = ctx.get_android_patched_libraries_relative_path()
lib_path = os.path.join(cur_path, rel_path, libName)
else:
# Search the multiple library paths where the library can be
lib_path = None
for path in value['srcDir']:
path = string.Template(path).substitute(env)
if os.path.exists(path):
lib_path = path
break
if not _add_library(ctx.root.make_node(lib_path), libName, java_source_nodes, android_manifests, library_packages, resource_nodes):
ctx.fatal('[ERROR] Could not add the android library - {}'.format(libName))
r_java_out = root_output.make_node('r')
aidl_out = root_output.make_node('aidl')
java_out = root_output.make_node('classes')
crunch_out = root_output.make_node('res')
manifest_merger_out = root_output.make_node('manifest')
game_package = ctx.get_android_package_name(project_name)
executable_name = ctx.get_executable_name(project_name)
Logs.debug('android: ****')
Logs.debug('android: All Java source directories {}'.format(java_source_nodes))
Logs.debug('android: All resouce directories {}'.format(resource_nodes))
java_include_paths = java_source_nodes + [ r_java_out, aidl_out ]
java_source_paths = java_source_nodes
uses = kw.get('use', [])
if not isinstance(uses, list):
uses = [ uses ]
################################
# Push all the Android apk packaging into their own build groups with
# lazy posting to ensure they are processed at the end of the build
ctx.post_mode = POST_LAZY
build_group_name = '{}_android_group'.format(project_name)
ctx.add_group(build_group_name)
ctx(
name = '{}_APK'.format(project_name),
target = executable_name,
features = [ 'android', 'android_apk', 'javac', 'use', 'uselib' ],
use = uses,
game_project = project_name,
# java settings
compat = env['JAVA_VERSION'], # java compatibility version number
classpath = library_jars,
srcdir = java_include_paths, # folder containing the sources to compile
outdir = java_out, # folder where to output the classes (in the build directory)
# android settings
android_manifests = android_manifests,
android_package = game_package,
aidl_outdir = aidl_out,
r_java_outdir = r_java_out,
manifest_merger_out = manifest_merger_out,
apk_layout_dir = apk_layout_dir,
apk_native_lib_dir = local_native_libs_node,
apk_output_dir = 'apk',
aapt_assets = [],
aapt_resources = resource_nodes,
aapt_extra_packages = library_packages,
aapt_package_flags = [],
aapt_package_resources_outdir = 'bin',
aapt_crunch_outdir = crunch_out,
)
# reset the build group/mode back to default
ctx.post_mode = POST_AT_ONCE
ctx.set_group('regular_group')
################################################################
@feature('android')
@before('apply_java')
def apply_android_java(self):
"""
Generates the AIDL tasks for all other tasks that may require it, and adds
their Java source directories to the current projects Java source paths
so they all get processed at the same time. Also processes the direct
Android Archive Resource uses.
"""
Utils.def_attrs(
self,
srcdir = [],
classpath = [],
aidl_srcdir = [],
aapt_assets = [],
aapt_includes = [],
aapt_resources = [],
aapt_extra_packages = [],
)
# validate we have some required attributes
apk_native_lib_dir = getattr(self, 'apk_native_lib_dir', None)
if not apk_native_lib_dir:
self.fatal('[ERROR] No "apk_native_lib_dir" specified in Android package task.')
android_manifests = getattr(self, 'android_manifests', None)
if not android_manifests:
self.fatal('[ERROR] No "android_manifests" specified in Android package task.')
manifest_nodes = []
for manifest in android_manifests:
if not isinstance(manifest, Node.Node):
manifest_nodes.append(self.path.get_src().make_node(manifest))
else:
manifest_nodes.append(manifest)
self.android_manifests = manifest_nodes
self.main_android_manifest = manifest_nodes[0] # guaranteed to be the main; manifests are added in order of precedence highest to lowest
# process the uses, only first level uses are supported at this time
libs = self.to_list(getattr(self, 'use', []))
Logs.debug('android: -> Processing Android libs used by APK {}'.format(libs))
input_manifests = []
use_libs_added = []
for lib_name in libs:
try:
task_gen = self.bld.get_tgen_by_name(lib_name)
task_gen.post()
except Exception:
continue
else:
if not hasattr(task_gen, 'aar_task'):
continue
use_libs_added.append(lib_name)
# required entries from the library
append_to_unique_list(self.aapt_extra_packages, task_gen.package)
append_to_unique_list(self.aapt_includes, task_gen.jar_task.outputs[0].abspath())
append_to_unique_list(self.aapt_resources, task_gen.aapt_resources)
append_to_unique_list(input_manifests, task_gen.manifest)
# optional entries from the library
if task_gen.aapt_assets:
append_to_unique_list(self.aapt_assets, task_gen.aapt_assets)
# since classpath is propagated by the java tool, we just need to make sure the jars are propagated to the android specific tools using aapt_includes
if task_gen.classpath:
append_to_unique_list(self.aapt_includes, task_gen.classpath)
if task_gen.native_libs:
native_libs_root = task_gen.native_libs_root
native_libs = task_gen.native_libs
for lib in native_libs:
rel_path = lib.path_from(native_libs_root)
tgt = apk_native_lib_dir.make_node(rel_path)
strip_task = self.create_task('strip_debug_symbols', lib, tgt)
self.bld.add_to_group(strip_task, 'regular_group')
Logs.debug('android: -> Android use libs added {}'.format(use_libs_added))
# generate the task to merge the manifests
manifest_nodes.extend(input_manifests)
if len(manifest_nodes) >= 2:
manifest_merger_out = getattr(self, 'manifest_merger_out', None)
if manifest_merger_out:
if not isinstance(manifest_merger_out, Node.Node):
manifest_merger_out = self.path.get_bld().make_node(manifest_merger_out)
else:
manifest_merger_out = self.path.get_bld().make_node('manifest')
manifest_merger_out.mkdir()
merged_manifest = manifest_merger_out.make_node('AndroidManifest.xml')
self.manifest_task = manifest_task = self.create_task('android_manifest_merger', manifest_nodes, merged_manifest)
manifest_task.env['MAIN_MANIFEST'] = manifest_nodes[0].abspath()
input_manifest_paths = [ manifest.abspath() for manifest in manifest_nodes[1:] ]
if manifest_task.env['MANIFEST_MERGER_OLD_VERSION']:
manifest_task.env['LIBRARY_MANIFESTS'] = input_manifest_paths
else:
manifest_task.env['LIBRARY_MANIFESTS'] = os.pathsep.join(input_manifest_paths)
self.main_android_manifest = merged_manifest
# generate all the aidl tasks
aidl_outdir = getattr(self, 'aidl_outdir', None)
if aidl_outdir:
if not isinstance(aidl_outdir, Node.Node):
aidl_outdir = self.path.get_bld().make_node(aidl_outdir)
else:
aidl_outdir = self.path.get_bld().make_node('aidl')
aidl_outdir.mkdir()
aidl_src_paths = collect_source_paths(self, 'aidl_src_path')
self.aidl_tasks = []
for srcdir in aidl_src_paths:
for aidl_file in srcdir.ant_glob('**/*.aidl'):
rel_path = aidl_file.path_from(srcdir)
java_file = aidl_outdir.make_node('{}.java'.format(os.path.splitext(rel_path)[0]))
aidl_task = self.create_task('aidl', aidl_file, java_file)
self.aidl_tasks.append(aidl_task)
java_src_paths = collect_source_paths(self, 'java_src_path')
append_to_unique_list(self.srcdir, java_src_paths)
jars = collect_source_paths(self, 'jars')
append_to_unique_list(self.classpath, jars)
Logs.debug('android: -> Additional Java source paths found {}'.format(java_src_paths))
################################################################
@feature('android')
@before_method('process_source')
@after_method('apply_java')
def apply_android(self):
"""
Generates the code generation task (produces R.java) and setups the task chaining
for AIDL, Java and the code gen task
"""
Utils.def_attrs(
self,
classpath = [],
aapt_resources = [],
aapt_includes = [],
aapt_extra_packages = [],
aapt_package_flags = [],
)
main_package = getattr(self, 'android_package', None)
if not main_package:
self.fatal('[ERROR] No "android_package" specified in Android package task.')
javac_task = getattr(self, 'javac_task', None)
if not javac_task:
self.fatal('[ERROR] It seems the "javac" task failed to be generated, unable to complete the Android build process.')
self.code_gen_task = code_gen_task = self.create_task('android_code_gen')
r_java_outdir = getattr(self, 'r_java_outdir', None)
if r_java_outdir:
if not isinstance(r_java_outdir, Node.Node):
r_java_outdir = self.path.get_bld().make_node(r_java_outdir)
else:
r_java_outdir = self.path.get_bld().make_node('r')
r_java_outdir.mkdir()
code_gen_task.env['OUTDIR'] = r_java_outdir.abspath()
android_manifest = self.main_android_manifest
code_gen_task.env['ANDROID_MANIFEST'] = android_manifest.abspath()
# resources
aapt_resources = []
for resource in self.aapt_resources:
if isinstance(resource, Node.Node):
aapt_resources.append(resource.abspath())
else:
aapt_resources.append(resource)
self.aapt_resource_paths = aapt_resources
code_gen_task.env.append_value('AAPT_RESOURCES', aapt_resources)
# included jars
aapt_includes = self.aapt_includes + self.classpath
aapt_include_paths = []
for include_path in self.aapt_includes:
if isinstance(include_path, Node.Node):
aapt_include_paths.append(include_path.abspath())
else:
aapt_include_paths.append(include_path)
self.aapt_include_paths = aapt_include_paths
code_gen_task.env.append_value('AAPT_INCLUDES', aapt_include_paths)
# additional flags
aapt_package_flags = self.aapt_package_flags
extra_packages = self.aapt_extra_packages
if extra_packages:
aapt_package_flags.extend([ '--extra-packages', ':'.join(extra_packages) ])
code_gen_task.env.append_value('AAPT_PACKAGE_FLAGS', aapt_package_flags)
# outputs (R.java files)
included_packages = [ main_package ] + extra_packages
output_nodes = []
for package in included_packages:
sub_dirs = package.split('.')
dir_path = os.path.join(*sub_dirs)
r_java_path = os.path.join(dir_path, 'R.java')
r_java_node = r_java_outdir.make_node(r_java_path)
output_nodes.append(r_java_node)
code_gen_task.set_outputs(output_nodes)
# task chaining
manifest_task = getattr(self, 'manifest_task', None)
if manifest_task:
code_gen_task.set_run_after(manifest_task)
aidl_tasks = getattr(self, 'aidl_tasks', [])
for aidl_task in aidl_tasks:
code_gen_task.set_run_after(aidl_task)
javac_task.set_run_after(self.code_gen_task)
################################################################
@feature('android_apk')
@after_method('apply_android')
def apply_android_apk(self):
"""
Generates the rest of the tasks necessary for building an APK (dex, crunch, package, build, sign, alignment).
"""
Utils.def_attrs(
self,
aapt_assets = [],
aapt_include_paths = [],
aapt_resource_paths = [],
aapt_package_flags = [],
)
root_input = self.path.get_src()
root_output = self.path.get_bld()
if not hasattr(self, 'target'):
self.target = self.name
executable_name = self.target
aapt_resources = self.aapt_resource_paths
aapt_includes = self.aapt_include_paths
aapt_assets = []
asset_nodes = []
for asset_dir in self.aapt_assets:
if isinstance(asset_dir, Node.Node):
aapt_assets.append(asset_dir.abspath())
asset_nodes.append(asset_dir)
else:
aapt_assets.append(asset_dir)
asset_nodes.append(root_input.make_node(asset_dir))
android_manifest = self.android_manifests[0]
if hasattr(self, 'manifest_task'):
android_manifest = self.manifest_task.outputs[0]
# dex task
apk_layout_dir = getattr(self, 'apk_layout_dir', None)
if apk_layout_dir:
if not isinstance(apk_layout_dir, Node.Node):
apk_layout_dir = self.path.get_bld().make_node(apk_layout_dir)
else:
apk_layout_dir = root_output.make_node('builder')
apk_layout_dir.mkdir()
self.dex_task = dex_task = self.create_task('dex')
self.dex_task.set_outputs(apk_layout_dir.make_node('classes.dex'))
dex_task.env.append_value('JAR_INCLUDES', aapt_includes)
dex_srcdir = self.outdir
dex_task.env['SRCDIR'] = dex_srcdir.abspath()
dex_task.srcdir = dex_srcdir
# crunch task
self.crunch_task = crunch_task = self.create_task('aapt_crunch')
crunch_outdir = getattr(self, 'aapt_crunch_outdir', None)
if crunch_outdir:
if not isinstance(crunch_outdir, Node.Node):
crunch_outdir = root_output.make_node(crunch_outdir)
else:
crunch_outdir = root_output.make_node('res')
crunch_outdir.mkdir()
crunch_task.set_outputs(crunch_outdir)
crunch_task.env.append_value('AAPT_INCLUDES', aapt_includes)
crunch_task.env.append_value('AAPT_RESOURCES', aapt_resources)
# package resources task
self.package_resources_task = package_resources_task = self.create_task('package_resources')
aapt_package_resources_outdir = getattr(self, 'aapt_package_resources_outdir', None)
if aapt_package_resources_outdir:
if not isinstance(aapt_package_resources_outdir, Node.Node):
aapt_package_resources_outdir = root_output.make_node(aapt_package_resources_outdir)
else:
aapt_package_resources_outdir = root_output.make_node('bin')
aapt_package_resources_outdir.mkdir()
package_resources_task.set_outputs(aapt_package_resources_outdir.make_node('{}.ap_'.format(executable_name)))
package_resources_task.env['ANDROID_MANIFEST'] = android_manifest.abspath()
package_resources_task.env.append_value('AAPT_INCLUDES', aapt_includes)
package_resources_task.env.append_value('AAPT_RESOURCES', aapt_resources)
################################
# generate the APK
# Generating all the APK has to be in the right order. This is important for Android store APK uploads,
# if the alignment happens before the signing, then the signing will blow over the alignment and will
# require a realignment before store upload.
# 1. Generate the unsigned, unaligned APK
# 2. Sign the APK
# 3. Align the APK
apk_output_dir = getattr(self, 'apk_output_dir', None)
if apk_output_dir:
if not isinstance(apk_output_dir, Node.Node):
apk_output_dir = root_output.make_node(apk_output_dir)
else:
apk_output_dir = root_output.make_node('apk')
apk_output_dir.mkdir()
# 1. build_apk
self.apk_task = apk_task = self.create_task('build_apk')
apk_task.env['SRCDIR'] = apk_layout_dir.abspath()
apk_task.srcdir = apk_layout_dir
apk_name = '{}_unaligned_unsigned.apk'.format(executable_name)
unsigned_unaligned_apk = apk_output_dir.make_node(apk_name)
apk_task.set_outputs(unsigned_unaligned_apk)
apk_task.env['ANDROID_MANIFEST'] = android_manifest.abspath()
apk_task.assets = asset_nodes
apk_task.env.append_value('AAPT_ASSETS', aapt_assets)
apk_task.env.append_value('AAPT_INCLUDES', aapt_includes)
apk_task.env.append_value('AAPT_RESOURCES', aapt_resources)
# 2. jarsign and 3. zipalign
final_apk_out = self.bld.get_output_folders(self.bld.env['PLATFORM'], self.bld.env['CONFIGURATION'])[0]
self.sign_and_align_apk(
executable_name, # base_apk_name
unsigned_unaligned_apk, # raw_apk
apk_output_dir, # intermediate_folder
final_apk_out # final_output
)
# task chaining
dex_task.set_run_after(self.javac_task)
crunch_task.set_run_after(dex_task)
package_resources_task.set_run_after(crunch_task)
apk_task.set_run_after(package_resources_task)
self.jarsign_task.set_run_after(apk_task)
###############################################################################
###############################################################################
def adb_call(*cmdArgs, **keywords):
'''
Issue a adb command. Args are joined into a single string with spaces
in between and keyword arguments supported is device=serial # of device
reported by adb.
Examples:
adb_call('start-server') results in "adb start-server" being executed
adb_call('push', local_path, remote_path, device='123456') results in "adb -s 123456 push <local_path> <remote_path>" being executed
'''
command = [ 'adb' ]
if 'device' in keywords:
command.extend([ '-s', keywords['device'] ])
command.extend(cmdArgs)
cmdline = ' '.join(command)
Logs.debug('adb_call: running command \'%s\'', cmdline)
try:
output = check_output(cmdline, stderr = subprocess.STDOUT, shell = True)
stripped_output = output.rstrip()
# don't need to dump the output of 'push' or 'install' commands
if not any(cmd for cmd in ('push', 'install') if cmd in cmdArgs):
if '\n' in stripped_output:
# the newline arg is because Logs.debug will replace newlines with spaces
# in the format string before passing it on to the logger
Logs.debug('adb_call: output = %s%s', '\n', stripped_output)
else:
Logs.debug('adb_call: output = %s', stripped_output)
return stripped_output
except Exception as inst:
Logs.debug('adb_call: exception was thrown: ' + str(inst))
return None # Return None so the caller can handle the failure gracefully'
def adb_ls(path, device_id, args = [], as_root = False):
'''
Special wrapper around calling "adb shell ls <args> <path>". This uses
adb_call under the hood but provides some additional error handling specific
to the "ls" command. Optionally, this command can be run as super user, or
'as_root', which is disabled by default.
Returns:
status - if the command failed or not
output - the stripped output from the ls command
'''
error_messages = [
'No such file or directory',
'Permission denied'
]
shell_command = [ 'shell' ]
if as_root:
shell_command.extend([ 'su', '-c' ])
shell_command.append('ls')
shell_command.extend(args)
shell_command.append(path)
Logs.debug('adb_ls: {}'.format(shell_command))
raw_output = adb_call(*shell_command, device = device_id)
if raw_output is None:
Logs.debug('adb_ls: No output given')
return False, None
if raw_output is None or any([error for error in error_messages if error in raw_output]):
Logs.debug('adb_ls: Error message found')
status = False
else:
Logs.debug('adb_ls: Command was successful')
status = True
return status, raw_output
def get_list_of_android_devices():
'''
Gets the connected android devices using the adb command devices and
returns a list of serial numbers of connected devices.
'''
devices = []
devices_output = adb_call("devices")
if devices_output is not None:
devices_output = devices_output.split(os.linesep)
for output in devices_output:
if any(x in output for x in ['List', '*', 'emulator']):
Logs.debug("android: skipping the following line as it has 'List', '*' or 'emulator' in it: %s" % output)
continue
device_serial = output.split()
if device_serial:
if 'unauthorized' in output.lower():
Logs.warn("[WARN] android: device %s is not authorized for development access. Please reconnect the device and check for a confirmation dialog." % device_serial[0])
else:
devices.append(device_serial[0])
Logs.debug("android: found device with serial: " + device_serial[0])
return devices
def get_device_access_type(device_id):
'''
Determines what kind of access level we have on the device
'''
adb_call('root', device = device_id) # this ends up being either a no-op or restarts adbd on device as root
adbd_info = adb_call('shell', '"ps | grep adbd"', device = device_id)
if adbd_info and ('root' in adbd_info):
Logs.debug('adb_call: Device - {} - has adbd root access'.format(device_id))
return ACCESS_ROOT_ADBD
su_test = adb_call('shell', '"su -c echo test"', device = device_id)
if su_test and ('test' in su_test):
Logs.debug('adb_call: Device - {} - has shell su access'.format(device_id))
return ACCESS_SHELL_SU
Logs.debug('adb_call: Unable to verify root access for device {}. Assuming default access mode.'.format(device_id))
return ACCESS_NORMAL
def get_device_file_timestamp(remote_file_path, device_id, as_root = False):
'''
Get the integer timestamp value of a file from a given device. Optionally, this
command can be run as super user, or 'as_root', which is disabled by default.
'''
timestamp_string = ''
device_sdk_version = adb_call('shell', 'getprop', 'ro.build.version.sdk', device = device_id)
if "\n" in device_sdk_version:
device_sdk_version = device_sdk_version[string.rfind(device_sdk_version, "\n"):]
# for devices running Android 5.1.1 or under, use the old 'ls' command for getting the file timestame
if int(device_sdk_version) <= 22:
ls_status, ls_output = adb_ls(args = [ '-l' ], path = remote_file_path, device_id = device_id, as_root = as_root)
if ls_status:
tgt_ls_fields = ls_output.split()
timestamp_string = '{} {}'.format(tgt_ls_fields[4], tgt_ls_fields[5])
Logs.debug('android_deploy: ls timestamp %s', timestamp_string)
# otherwise for newer devices we can use the 'stat' command
else:
adb_command = [ 'shell' ]
if as_root:
adb_command.extend([ 'su', '-c' ])
adb_command.extend([ 'stat', '-c', '%y', remote_file_path ])
file_timestamp = adb_call(*adb_command, device = device_id)
if file_timestamp and 'No such file or directory' not in file_timestamp:
# strip the seconds and milliseconds from the time format from the stat command
timestamp_string = file_timestamp[:file_timestamp.rfind(':')]
Logs.debug('android_deploy: stat timestamp %s', timestamp_string)
if timestamp_string:
target_time = time.mktime(time.strptime(timestamp_string, "%Y-%m-%d %H:%M"))
Logs.debug('android_deploy: {} time is {}'.format(remote_file_path, target_time))
return target_time
return 0
def auto_detect_device_storage_path(device_id, log_warnings = False):
'''
Uses the device's environment variable "EXTERNAL_STORAGE" to determine the correct
path to public storage that has write permissions
'''
def _log_debug(message):
Logs.debug('adb_call: {}'.format(message))
def _log_warn(message):
Logs.warn('[WARN] {}'.format(message))
log_func = _log_warn if log_warnings else _log_debug
external_storage = adb_call('shell', '"set | grep EXTERNAL_STORAGE"', device = device_id)
if not external_storage:
log_func('Call to get the EXTERNAL_STORAGE environment variable from device {} failed.'.format(device_id))
return ''
storage_path = external_storage.split('=')
if len(storage_path) != 2:
log_func('Received bad data while attempting extract the EXTERNAL_STORAGE environment variable from device {}.'.format(device_id))
return ''
var_path = storage_path[1].strip()
status, _ = adb_ls(var_path, device_id)
if status:
return var_path
else:
Logs.debug('adb_call: The path specified in EXTERNAL_STORAGE seems to have permission issues, attempting to resolve with realpath for device {}.'.format(device_id))
real_path = adb_call('shell', 'realpath', var_path, device = device_id)
if not real_path:
log_func('Something happend while attempting to resolve the path from the EXTERNAL_STORAGE environment variable from device {}.'.format(device_id))
return ''
real_path = real_path.strip()
status, _ = adb_ls(real_path, device_id)
if status:
return real_path
else:
log_func('Unable to validate the resolved EXTERNAL_STORAGE environment variable path from device {}.'.format(device_id))
return ''
def construct_assets_path_for_game_project(ctx, game_project):
'''
Generates the relative path from the root of public storage to the application's specific data folder
'''
return 'Android/data/{}/files'.format(ctx.get_android_package_name(game_project))
def run_rc_job(ctx, job, source, target, game, assets_type, is_obb):
rc_path = get_resource_compiler_path(ctx)
command_args = [
'"{}"'.format(rc_path),
'/job={}'.format(os.path.join('Bin64', 'rc', job)),
'/p={}'.format(assets_type),
'/src="{}"'.format(source),
'/trg="{}"'.format(target),
'/threads={}'.format(ctx.options.max_cores),
'/game={}'.format(game.lower())
]
if is_obb:
pacakge_name = ctx.get_android_package_name(game)
app_version_number = ctx.get_android_version_number(game)
command_args.extend([
'/obb_pak=main.{}.{}.obb'.format(app_version_number, pacakge_name),
'/obb_patch_pak=patch.{}.{}.obb'.format(app_version_number, pacakge_name)
])
command = ' '.join(command_args)
Logs.debug('android_deploy: running RC command - {}'.format(command))
call(command, shell = True)
def build_shader_paks(ctx, game, assets_type, layout_node, shaders_source_paths):
pak_shaders_script = ctx.path.find_resource('Tools/PakShaders/pak_shaders.py')
shaders_pak_dir = ctx.path.make_node("build/{}/{}".format(assets_type, game).lower())
shaders_pak_dir.mkdir()
command_args = [
get_python_path(ctx),
'"{}"'.format(pak_shaders_script.abspath()),
'"{}"'.format(shaders_pak_dir.abspath()),
'-s'
]
command_args.extend([ '{},"{}"'.format(key, path.abspath()) for key, path in shaders_source_paths.iteritems() ])
command = ' '.join(command_args)
Logs.debug('android_deploy: Running command - {}'.format(command))
call(command, shell = True)
shader_paks = shaders_pak_dir.ant_glob('*.pak')
if not shader_paks:
ctx.fatal('[ERROR] No shader pak files were found after running the pak_shaders command')
# copy the shader paks into the layout directory
shader_pak_dest = layout_node.make_node(game.lower())
for pak in shader_paks:
dest_node = shader_pak_dest.make_node(pak.name)
dest_node.delete()
Logs.debug('android_deploy: Copying {} => {}'.format(pak.relpath(), dest_node.relpath()))
shutil.copy2(pak.abspath(), dest_node.abspath())
def pack_assets_in_apk(ctx, executable_name, layout_node):
class command_buffer:
def __init__(self, base_command_args):
self._args_master = base_command_args
self._base_len = len(' '.join(base_command_args))
self.args = self._args_master[:]
self.len = self._base_len
def flush(self):
if len(self.args) > len(self._args_master):
command = ' '.join(self.args)
Logs.debug('android_deploy: Running command - {}'.format(command))
call(command, shell = True)
self.args = self._args_master[:]
self.len = self._base_len
android_cache = get_android_cache_node(ctx)
# create a copy of the existing barebones APK for the assets
variant = getattr(ctx.__class__, 'variant', None)
if not variant:
(platform, configuration) = ctx.get_platform_and_configuration()
variant = '{}_{}'.format(platform, configuration)
raw_apk_path = os.path.join(ctx.get_bintemp_folder_node().name, variant, ctx.get_android_project_relative_path(), executable_name, 'apk')
barebones_apk_path = '{}/{}_unaligned_unsigned.apk'.format(raw_apk_path, executable_name)
if not os.path.exists(barebones_apk_path):
ctx.fatal('[ERROR] Unable to find the barebones APK in path {}. Run the build command for {} again to generate it.'.format(barebones_apk_path, variant))
apk_cache_node = android_cache.make_node('apk')
apk_cache_node.mkdir()
raw_apk_with_asset_node = apk_cache_node.make_node('{}_unaligned_unsigned{}.apk'.format(executable_name, APK_WITH_ASSETS_SUFFIX))
shutil.copy2(barebones_apk_path, raw_apk_with_asset_node.abspath())
# We need to make the 'assets' junction in order to generate the correct pathing structure when adding
# files to an existing APK
asset_dir = 'assets'
asset_junction = android_cache.make_node(asset_dir)
if os.path.exists(asset_junction.abspath()):
remove_junction(asset_junction.abspath())
try:
Logs.debug('android_deploy: Creating assets junction "{}" ==> "{}"'.format(layout_node.abspath(), asset_junction.abspath()))
junction_directory(layout_node.abspath(), asset_junction.abspath())
except:
ctx.fatal("[ERROR] Could not create junction for asset folder {}".format(layout_node.abspath()))
# add the assets to the APK
command = command_buffer([
'"{}"'.format(ctx.env['AAPT']),
'add',
'"{}"'.format(raw_apk_with_asset_node.abspath())
])
command_len_max = get_command_line_limit()
with push_dir(android_cache.abspath()):
ctx.user_message('Packing assets into the APK...')
Logs.debug('android_deploy: -> from {}'.format(os.getcwd()))
assets = asset_junction.ant_glob('**/*')
for asset in assets:
file_path = asset.path_from(android_cache)
file_path = '"{}"'.format(file_path.replace('\\', '/'))
path_len = len(file_path) + 1 # 1 for the space
if (command.len + path_len) >= command_len_max:
command.flush()
command.len += path_len
command.args.append(file_path)
# flush the command buffer one more time
command.flush()
return apk_cache_node, raw_apk_with_asset_node
class adb_copy_output(Task):
'''
Class to handle copying of a single file in the layout to the android
device.
'''
def __init__(self, *k, **kw):
Task.__init__(self, *k, **kw)
self.device = ''
self.target = ''
def set_device(self, device):
'''Sets the android device (serial number from adb devices command) to copy the file to'''
self.device = device
def set_target(self, target):
'''Sets the target file directory (absolute path) and file name on the device'''
self.target = target
def run(self):
# Embed quotes in src/target so that we can correctly handle spaces
src = '"{}"'.format(self.inputs[0].abspath())
tgt = '"{}"'.format(self.target)
Logs.debug('adb_copy_output: performing copy - {} to {} on device {}'.format(src, tgt, self.device))
adb_call('push', src, tgt, device = self.device)
return 0
def runnable_status(self):
if Task.runnable_status(self) == ASK_LATER:
return ASK_LATER
return RUN_ME
@taskgen_method
def adb_copy_task(self, android_device, src_node, output_target):
'''
Create a adb_copy_output task to copy the src_node to the ouput_target
on the specified device. output_target is the absolute path and file name
of the target file.
'''
copy_task = self.create_task('adb_copy_output', src_node)
copy_task.set_device(android_device)
copy_task.set_target(output_target)
###############################################################################
# create a deployment context for each build variant
for configuration in ['debug', 'profile', 'release']:
for target in ['android_armv7_gcc', 'android_armv7_clang', 'android_armv8_clang']:
build_variant = '{}_{}'.format(target, configuration)
class DeployAndroidContext(Build.BuildContext):
fun = 'deploy'
variant = build_variant
def get_asset_deploy_mode(self):
if not hasattr(self, 'asset_deploy_mode'):
asset_deploy_mode = self.options.deploy_android_asset_mode.lower()
configuration = self.env['CONFIGURATION']
if configuration == 'release':
# force release mode to use the project's settings
asset_deploy_mode = ASSET_DEPLOY_PROJECT_SETTINGS
if asset_deploy_mode not in ASSET_DEPLOY_MODES:
bld.fatal('[ERROR] Unable to determine the asset deployment mode. Valid options for --deploy-android-asset-mode are limited to: {}'.format(ASSET_DEPLOY_MODES))
self.asset_deploy_mode = asset_deploy_mode
setattr(self.options, 'deploy_android_asset_mode', asset_deploy_mode)
return self.asset_deploy_mode
def use_vfs(self):
if not hasattr(self, 'cached_use_vfs'):
self.cached_use_vfs = (self.get_bootstrap_vfs() == '1')
return self.cached_use_vfs
def use_obb(self):
if not hasattr(self, 'cached_use_obb'):
game = self.get_bootstrap_game()
use_main_obb = (self.get_android_use_main_obb(game).lower() == 'true')
use_patch_obb = (self.get_android_use_patch_obb(game).lower() == 'true')
self.cached_use_obb = (use_main_obb or use_patch_obb)
return self.cached_use_obb
def get_asset_cache_path(self):
if not hasattr(self, 'asset_cache_path'):
game = self.get_bootstrap_game()
assets = self.get_bootstrap_assets("android").lower()
self.asset_cache_path = "Cache/{}/{}".format(game, assets)
return self.asset_cache_path
def get_asset_cache(self):
if not hasattr(self, 'asset_cache'):
self.asset_cache = self.path.find_dir(self.get_asset_cache_path())
return self.asset_cache
def get_layout_node(self):
if not hasattr(self, 'android_layout_node'):
game = self.get_bootstrap_game()
asset_deploy_mode = self.get_asset_deploy_mode()
if asset_deploy_mode == ASSET_DEPLOY_LOOSE:
self.android_layout_node = self.get_asset_cache()
elif asset_deploy_mode == ASSET_DEPLOY_PAKS:
self.android_layout_node = self.path.make_node('AndroidLayoutPak/{}'.format(game))
elif asset_deploy_mode == ASSET_DEPLOY_PROJECT_SETTINGS:
layout_folder_name = 'AndroidLayoutObb' if self.use_obb() else 'AndroidLayoutPak'
self.android_layout_node = self.path.make_node('{}/{}'.format(layout_folder_name, game))
# just incase get_layout_node is called before deploy_android_asset_mode has been validated, which
# could mean android_layout_node never getting set
try:
return self.android_layout_node
except:
self.fatal('[ERROR] Unable to determine the asset layout node for Android.')
def user_message(self, message):
Logs.pprint('CYAN', message)
class DeployAndroid(DeployAndroidContext):
after = ['build_' + build_variant]
cmd = 'deploy_' + build_variant
features = ['deploy_android_prepare']
class DeployAndroidDevices(DeployAndroidContext):
after = ['deploy_' + build_variant]
cmd = 'deploy_devices_' + build_variant
features = ['deploy_android_devices']
@taskgen_method
@feature('deploy_android_prepare')
def prepare_to_deploy_android(tsk_gen):
'''
Prepare the deploy process by generating the necessary asset layout
directories, pak / obb files and packing assets in the APK if necessary.
'''
bld = tsk_gen.bld
platform = bld.env['PLATFORM']
configuration = bld.env['CONFIGURATION']
# handle a few non-fatal early out cases
if not bld.is_android_platform(platform) or not bld.is_option_true('deploy_android') or bld.options.from_android_studio:
bld.user_message('Skipping Android Deployment...')
return
# make sure the adb server is running first before we execute any other commands
# special case how these error messages are handled to only be truely fatal when
# executed from the editor
log_error = Logs.error
if bld.options.from_editor_deploy:
log_error = bld.fatal
if adb_call('start-server') is None:
log_error('[ERROR] Failed to start adb server, unable to perform the deploy')
return
devices = get_list_of_android_devices()
if len(devices) == 0:
adb_call('kill-server')
log_error('[ERROR] No Android devices detected, unable to deploy')
return
# determine the selected asset deploy mode
asset_deploy_mode = bld.get_asset_deploy_mode()
Logs.debug('android_deploy: Using asset mode - {}'.format(asset_deploy_mode))
if bld.get_asset_cache() is None:
asset_dir = bld.get_asset_cache_path()
bld.fatal('[ERROR] There is no asset cache to read from at {}. Please run AssetProcessor / AssetProcessorBatch from '
'the Bin64vc120 / Bin64vc140 / BinMac64 directory with "es3" assets enabled in the AssetProcessorPlatformConfig.ini'.format(asset_dir))
game = bld.get_bootstrap_game()
executable_name = bld.get_executable_name(game)
assets = bld.get_bootstrap_assets("android")
# handle the asset pre-processing
if (asset_deploy_mode == ASSET_DEPLOY_PAKS) or (asset_deploy_mode == ASSET_DEPLOY_PROJECT_SETTINGS):
if bld.use_vfs():
bld.fatal('[ERROR] Cannot use VFS when the --deploy-android-asset-mode is set to "{}", please set remote_filesystem=0 in bootstrap.cfg'.format(asset_deploy_mode))
asset_cache = bld.get_asset_cache()
layout_node = bld.get_layout_node()
# generate the pak/obb files
is_obb = (asset_deploy_mode == ASSET_DEPLOY_PROJECT_SETTINGS) and bld.use_obb()
rc_job = bld.get_android_rc_obb_job(game) if is_obb else bld.get_android_rc_pak_job(game)
bld.user_message('Generating the necessary pak files...')
run_rc_job(bld, rc_job, asset_cache.relpath(), layout_node.relpath(), game, assets, is_obb)
# handles the shaders
if asset_deploy_mode == ASSET_DEPLOY_PROJECT_SETTINGS:
shadercachestartup_pak = '{}/shadercachestartup.pak'.format(game).lower()
shaderscache_pak = '{}/shadercache.pak'.format(game).lower()
if layout_node.find_resource(shadercachestartup_pak) and layout_node.find_resource(shaderscache_pak):
bld.user_message('Using found shaders paks in the layout folder - {}'.format(layout_node.relpath()))
else:
bld.user_message('Searching for cached shaders locally and on connected devices...')
shader_types = [ 'gles3_0', 'gles3_1' ]
shaders_source_paths = dict()
relative_assets_path = construct_assets_path_for_game_project(bld, game)
for shader_flavor in shader_types:
shader_cache_path = 'user/cache/shaders/cache/{}'.format(shader_flavor).lower()
# Check if we already have the shader's source files or we need to pull them from the device.
shader_user_node = asset_cache.find_dir(shader_cache_path)
if shader_user_node:
shaders_source_paths[shader_flavor] = shader_user_node
Logs.debug('android_deploy: Skipping pulling shaders of type {} from device. Using "user" folder instead.'.format(shader_flavor))
# pull the shaders
else:
pull_shaders_folder = bld.path.make_node("build/temp/{}/{}/{}".format(assets, game, shader_flavor).lower())
pull_shaders_folder.mkdir()
if os.path.exists(pull_shaders_folder.abspath()):
try:
shutil.rmtree(pull_shaders_folder.abspath(), ignore_errors = True)
except:
Logs.warn('[WARN] Failed to delete {} folder to copy shaders'.format(pull_shaders_folder.relpath()))
for android_device in devices:
storage_path = auto_detect_device_storage_path(android_device)
if not storage_path:
continue
device_folder = '{}/{}/{}'.format(storage_path, relative_assets_path, shader_cache_path)
ls_status, _ = adb_ls(device_folder, android_device)
if not ls_status:
continue
command = [
'pull',
device_folder,
'"{}"'.format(pull_shaders_folder.abspath()),
]
adb_call(*command, device = android_device)
shaders_source_paths[shader_flavor] = pull_shaders_folder
break
found_shader_types = shaders_source_paths.keys()
if found_shader_types != shader_types:
message = 'Unable to find all shader types needed for shader pak generation. Found {}, Expected {}'.format(found_shader_types, shader_types)
if configuration == 'release':
bld.fatal('[ERROR] android_deploy: {}'.format(message))
else:
Logs.warn('[WARN] android_deploy: {}'.format(message))
if shaders_source_paths:
bld.user_message('Generating the shader pak files...')
build_shader_paks(bld, game, assets, layout_node, shaders_source_paths)
# get the keystore passwords
set_key_and_store_pass(bld)
# generate the new apk with assets in it
apk_cache_node, raw_apk_with_asset = pack_assets_in_apk(bld, executable_name, layout_node)
# sign and align the apk
final_apk_out = bld.get_output_folders(platform, configuration)[0]
tsk_gen.sign_and_align_apk(
executable_name, # base_apk_name
raw_apk_with_asset, # raw_apk
apk_cache_node, # intermediate_folder
final_apk_out, # final_output
APK_WITH_ASSETS_SUFFIX # suffix
)
Options.commands.append('deploy_devices_' + platform + '_' + configuration)
@taskgen_method
@feature('deploy_android_devices')
def deploy_to_devices(tsk_gen):
'''
Installs the project APK and copies the layout directory to all the
android devices that are connected to the host.
'''
def should_copy_file(src_file_node, target_time):
should_copy = False
try:
stat_src = os.stat(src_file_node.abspath())
should_copy = stat_src.st_mtime >= target_time
except OSError:
pass
return should_copy
bld = tsk_gen.bld
platform = bld.env['PLATFORM']
configuration = bld.env['CONFIGURATION']
game = bld.get_bootstrap_game()
executable_name = bld.get_executable_name(game)
asset_deploy_mode = bld.get_asset_deploy_mode()
# get location of APK either from command line option or default build location
if Options.options.apk_path == '':
suffix = ''
if asset_deploy_mode == ASSET_DEPLOY_PROJECT_SETTINGS:
suffix = APK_WITH_ASSETS_SUFFIX
output_folder = bld.get_output_folders(platform, configuration)[0]
apk_name = '{}/{}{}.apk'.format(output_folder.abspath(), executable_name, suffix)
else:
apk_name = Options.options.apk_path
do_clean = bld.is_option_true('deploy_android_clean_device')
deploy_executable = bld.is_option_true('deploy_android_executable')
deploy_assets = (asset_deploy_mode == ASSET_DEPLOY_LOOSE) or (asset_deploy_mode == ASSET_DEPLOY_PAKS)
layout_node = bld.get_layout_node()
Logs.debug('android_deploy: deploy options: do_clean {}, deploy_exec {}, deploy_assets {}'.format(do_clean, deploy_executable, deploy_assets))
# Some checkings before we start the deploy process.
if deploy_executable and not os.path.exists(apk_name):
bld.fatal('[ERROR] Could not find the Android executable (APK) in path - {} - necessary for deployment. Run the build command for {}_{} to generate it'.format(apk_name, platform, configuration))
return
deploy_libs = bld.options.deploy_android_attempt_libs_only and not (do_clean or bld.options.from_editor_deploy) and not (asset_deploy_mode == ASSET_DEPLOY_PROJECT_SETTINGS)
Logs.debug('android_deploy: The option to attempt library only deploy is %s', 'ENABLED' if deploy_libs else 'DISABLED')
variant = '{}_{}'.format(platform, configuration)
apk_builder_path = os.path.join( variant, bld.get_android_project_relative_path(), executable_name, 'builder' )
apk_builder_node = bld.get_bintemp_folder_node().make_node(apk_builder_path)
abi_func = getattr(bld, 'get_%s_target_abi' % platform, None)
lib_paths = ['lib'] + [ abi_func() ] if abi_func else [] # since we don't support 'fat' apks it's ok to not have the abi specifier but it's still perferred
stripped_libs_node = apk_builder_node.make_node(lib_paths)
game_package = bld.get_android_package_name(game)
device_install_path = '/data/data/{}'.format(game_package)
apk_stat = os.stat(apk_name)
apk_size = apk_stat.st_size
relative_assets_path = construct_assets_path_for_game_project(bld, game)
# This is the name of a file that we will use as a marker/timestamp. We
# will get the timestamp of the file off the device and compare that with
# asset files on the host machine to determine if the host machine asset
# file is newer than what the device has, and if so copy it to the device.
timestamp_file_name = 'engineroot.txt'
host_timestamp_file = bld.path.find_node(timestamp_file_name)
deploy_count = 0
devices = get_list_of_android_devices()
for android_device in devices:
bld.user_message('Starting to deploy to android device ' + android_device)
storage_path = auto_detect_device_storage_path(android_device, log_warnings = True)
if not storage_path:
continue
output_target = '{}/{}'.format(storage_path, relative_assets_path)
device_timestamp_file = '{}/{}'.format(output_target, timestamp_file_name)
if do_clean:
bld.user_message('Cleaning target before deployment...')
adb_call('shell', 'rm', '-rf', output_target, device = android_device)
bld.user_message('Target Cleaned...')
package_name = bld.get_android_package_name(game)
if len(package_name) != 0 and deploy_executable:
bld.user_message('Uninstalling package ' + package_name)
adb_call('uninstall', package_name, device = android_device)
################################
if deploy_libs:
access_type = get_device_access_type(android_device)
if access_type == ACCESS_NORMAL:
Logs.warn('[WARN] android_deploy: Unable to perform the library only copy on device {}'.format(android_device))
elif access_type in (ACCESS_ROOT_ADBD, ACCESS_SHELL_SU):
device_file_staging_path = '{}/LY_Staging'.format(storage_path)
device_lib_timestamp_file = '{}/files/{}'.format(device_install_path, timestamp_file_name)
def _adb_push(source_node, dest, device_id):
adb_call('push', '"{}"'.format(source_node.abspath()), dest, device = device_id)
def _adb_shell(source_node, dest, device_id):
temp_dest = '{}/{}'.format(device_file_staging_path, source_node.name)
adb_call('push', '"{}"'.format(source_node.abspath()), temp_dest, device = device_id)
adb_call('shell', 'su', '-c', 'cp', temp_dest, dest, device = device_id)
if access_type == ACCESS_ROOT_ADBD:
adb_root_push_func = _adb_push
elif access_type == ACCESS_SHELL_SU:
adb_root_push_func = _adb_shell
adb_call('shell', 'mkdir', device_file_staging_path)
install_check = adb_call('shell', '"pm list packages | grep {}"'.format(game_package), device = android_device)
if install_check:
target_time = get_device_file_timestamp(device_lib_timestamp_file, android_device, True)
# cases for early out infavor of re-installing the APK:
# If target_time is zero, the file wasn't found which would indicate we haven't attempt to push just the libs before
# The dalvik executable is newer than the last time we deployed to this device
if target_time == 0 or should_copy_file(apk_builder_node.make_node('classes.dex'), target_time):
bld.user_message('A new APK needs to be installed instead for device {}'.format(android_device))
# otherwise attempt to copy the libs directly
else:
bld.user_message('Scanning which libraries need to be copied...')
libs_to_add = []
total_libs_size = 0
fallback_to_apk = False
libs = stripped_libs_node.ant_glob('**/*.so')
for lib in libs:
if should_copy_file(lib, target_time):
lib_stat = os.stat(lib.abspath())
total_libs_size += lib_stat.st_size
libs_to_add.append(lib)
if total_libs_size >= apk_size:
bld.user_message('Too many libriares changed, falling back to installing a new APK on {}'.format(android_device))
fallback_to_apk = True
break
if not fallback_to_apk:
for lib in libs_to_add:
final_target_dir = '{}/lib/{}'.format(device_install_path, lib.name)
adb_root_push_func(lib, final_target_dir, android_device)
adb_call('shell', 'su', '-c', 'chown', LIB_OWNER_GROUP, final_target_dir, device = android_device)
adb_call('shell', 'su', '-c', 'chmod', LIB_FILE_PERMISSIONS, final_target_dir, device = android_device)
deploy_executable = False
# update the timestamp file
adb_root_push_func(host_timestamp_file, device_lib_timestamp_file, android_device)
adb_call('shell', 'su', '-c', 'touch', device_lib_timestamp_file, device = android_device)
# clean up the staging directory
if access_type == ACCESS_SHELL_SU:
adb_call('shell', 'rm', '-rf', device_file_staging_path)
################################
if deploy_executable:
install_options = getattr(Options.options, 'deploy_android_install_options')
replace_package = ''
if bld.is_option_true('deploy_android_replace_apk'):
replace_package = '-r'
bld.user_message('Installing ' + apk_name)
install_result = adb_call('install', install_options, replace_package, '"{}"'.format(apk_name), device = android_device)
if not install_result or 'success' not in install_result.lower():
Logs.warn('[WARN] android deploy: failed to install APK on device %s.' % android_device)
if install_result:
# The error msg is the last non empty line of the output.
error_msg = next(error for error in reversed(install_result.splitlines()) if error)
Logs.warn('[WARN] android deploy: %s' % error_msg)
continue
if deploy_assets:
target_time = get_device_file_timestamp(device_timestamp_file, android_device)
if do_clean or target_time == 0:
bld.user_message('Copying all assets to the device {}. This may take some time...'.format(android_device))
# There is a chance that if someone ran VFS before this deploy on an empty directory the output_target directory will have
# already existed when we do the push command. In this case if we execute adb push command it will create an ES3 directory
# and put everything there, causing the deploy to be 'successful' but the game will crash as it won't be able to find the
# assets. Since we detected a "clean" build, wipe out the output_target folder if it exists first then do the push and
# everything will be just fine.
ls_status, _ = adb_ls(output_target, android_device)
if ls_status:
adb_call('shell', 'rm', '-rf', output_target)
push_status = adb_call('push', '"{}"'.format(layout_node.abspath()), output_target, device = android_device)
if not push_status:
# Clean up any files we may have pushed to make the next run success rate better
adb_call('shell', 'rm', '-rf', output_target, device = android_device)
bld.fatal('[ERROR] The ABD command to push all the files to the device failed.')
continue
else:
layout_files = layout_node.ant_glob('**/*')
bld.user_message('Scanning {} files to determine which ones need to be copied...'.format(len(layout_files)))
for src_file in layout_files:
# Faster to check if we should copy now rather than in the copy_task
if should_copy_file(src_file, target_time):
final_target_dir = '{}/{}'.format(output_target, string.replace(src_file.path_from(layout_node), '\\', '/'))
tsk_gen.adb_copy_task(android_device, src_file, final_target_dir)
# Push the timestamp_file_name last so that it has a timestamp that we can use on the next deploy to know which files to
# upload to the device
adb_call('push', '"{}"'.format(host_timestamp_file.abspath()), device_timestamp_file, device = android_device)
deploy_count = deploy_count + 1
if deploy_count == 0:
bld.fatal('[ERROR] Failed to deploy the build to any connected devices.')
|
the-stack_0_24788
|
import numpy as np
from scipy.spatial import distance_matrix
def epsilon_greedy_policy(epsilon, state, q_values, options):
valid = np.array(options)
if np.random.random() > epsilon:
max_value = np.max(q_values[state][valid])
max_actions = np.intersect1d(valid, np.where(q_values[state] == max_value))
action = np.random.choice(max_actions)
else:
action = np.random.choice(valid)
return action
def update_qvalues(q_values, distances, state, action, alpha, gamma):
next_value = q_values[action].max()
reward = -distances[state, action]
q_values[state, action] *= 1 - alpha
q_values[state, action] += alpha * (reward + gamma * next_value)
if state != 0:
next_value = q_values[0].max()
zreward = -distances[state, 0]
q_values[state, 0] *= 1 - (alpha / 100)
q_values[state, 0] += (alpha / 100) * (zreward + gamma * next_value)
return q_values, reward
class QModel:
def __init__(self, points):
np.random.seed(42)
self.points = points
self.n = len(points)
self.distances = distance_matrix(self.points, self.points)
def learn(self, distances, epochs=100, epsilon0=1.0, alpha0=0.1, gamma=0.97, decay=0.0):
rewards = []
q_values = np.zeros([self.n, self.n])
q_values[range(self.n), range(self.n)] = -np.inf
for i in range(epochs):
total_reward = 0
state = 0
path = [state]
options = list(range(self.n))
alpha = alpha0 / (1 + i * decay)
epsilon = epsilon0 / (1 + i * decay)
while len(options) > 1:
options.remove(state)
action = epsilon_greedy_policy(epsilon, state, q_values, options)
q_values, reward = update_qvalues(
q_values, distances, state, action, alpha, gamma
)
total_reward += reward
path.append(action)
state = action
# back to start
action = 0
q_values, reward = update_qvalues(
q_values, distances, state, action, alpha, gamma
)
total_reward += reward
path.append(action)
rewards.append(total_reward)
if i % 200 == 0:
print("reward", reward)
return q_values
def solve(self):
q_values = self.learn(self.distances, epochs=400, epsilon0=1, gamma=-1, alpha0=1, decay=0.05)
state = 0
path = [state]
options = list(range(self.n))
distance = 0
while len(options) > 1:
options.remove(state)
action = epsilon_greedy_policy(0, state, q_values, options)
distance += self.distances[state, action]
path.append(action)
state = action
path.append(0)
distance += self.distances[state, 0]
return {
"ordered_points": np.array(self.points)[path].tolist(),
"distance": distance,
}
|
the-stack_0_24789
|
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import models
from torch.autograd import Function
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
from copy import deepcopy as copy
from pdb import set_trace
VOCAB_SIZE = 2
class BatchNormConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BatchNormConv2d, self).__init__()
self.conv2d = nn.Conv2d(in_channels, out_channels, **kwargs)
self.batch_norm = nn.BatchNorm2d(out_channels, eps=1e-3)
def forward(self, x):
x = self.conv2d(x)
x = self.batch_norm(x)
return F.relu(x, inplace=True)
class Dense(nn.Module):
def __init__(self, in_features, out_features, activation=None):
super(Dense, self).__init__()
self.linear = nn.Linear(in_features, out_features)
self.activation = activation
def forward(self, x):
x = self.linear(x)
if self.activation is not None:
x = self.activation(x, inplace=True)
return x
class EmbeddingNet(nn.Module):
def normalize(self, x):
buffer = torch.pow(x, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
normalization_constant = torch.sqrt(normp)
output = torch.div(x, normalization_constant.view(-1, 1).expand_as(x))
return output
class PosNet(EmbeddingNet):
def __init__(self):
super(PosNet, self).__init__()
# Input 1
self.Conv2d_1a = nn.Conv2d(3, 64, bias=False, kernel_size=10, stride=2)
self.Conv2d_2a = BatchNormConv2d(64, 32, bias=False, kernel_size=3, stride=1)
self.Conv2d_3a = BatchNormConv2d(32, 32, bias=False, kernel_size=3, stride=1)
self.Conv2d_4a = BatchNormConv2d(32, 32, bias=False, kernel_size=2, stride=1)
self.Dense1 = Dense(6 * 6 * 32, 32)
self.alpha = 10
def forward(self, input_batch):
# 128 x 128 x 3
x = self.Conv2d_1a(input_batch)
# 60 x 60 x 64
x = self.Conv2d_2a(x)
# 58 x 58 x 64
x = F.max_pool2d(x, kernel_size=2, stride=2)
# 29 x 29 x 32
x = self.Conv2d_3a(x)
# 27 x 27 x 32
x = F.max_pool2d(x, kernel_size=2, stride=2)
# 13 x 13 x 32
x = self.Conv2d_4a(x)
# 12 x 12 x 32
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = x.view(x.size()[0], -1)
# 6 x 6 x 32
x = self.Dense1(x)
# 32
return self.normalize(x) * self.alpha
class DenseClassifier(nn.Module):
def __init__(self, num_classes, h1=20, h2=30):
super(DenseClassifier, self).__init__()
self.Conv2d_1a_3x3 = BatchNormConv2d(100, 10, kernel_size=3, stride=1)
self.SpatialSoftmax = nn.Softmax2d()
self.FullyConnected7a = Dense(33 * 33 * 100, VOCAB_SIZE)
self.FullyConnected7b = Dense(128, VOCAB_SIZE)
self._Conv2d_1a_3x3 = BatchNormConv2d(100, 10, kernel_size=3, stride=1)
self._SpatialSoftmax = nn.Softmax2d()
self._FullyConnected7a = Dense(33 * 33 * 100, VOCAB_SIZE)
self._FullyConnected7b = Dense(128, VOCAB_SIZE)
def forward(self, x):
# x = torch.mean(x, axis=0, keepdims=True)
# 31 x 31 x 20
# out1 = self.Conv2d_1a_3x3(x)
out1 = self.SpatialSoftmax(x)
out1 = self.FullyConnected7a(out1.view(out1.size()[0], -1))
# out1 = self.FullyConnected7b(out1)
# out2 = self._Conv2d_1a_3x3(x)
out2 = self._SpatialSoftmax(x)
out2 = self._FullyConnected7a(out2.view(out2.size()[0], -1))
# out2 = self._FullyConnected7b(out2)
return out1, out2
class TCNModel(EmbeddingNet):
def __init__(self, inception):
super(TCNModel, self).__init__()
self.transform_input = True
self.Conv2d_1a_3x3 = inception.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = inception.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = inception.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = inception.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = inception.Conv2d_4a_3x3
self.Mixed_5b = inception.Mixed_5b
self.Mixed_5c = inception.Mixed_5c
self.Mixed_5d = inception.Mixed_5d
self.Mixed_6a = inception.Mixed_6a
self.Mixed_6b = inception.Mixed_6b
self.Mixed_6c = inception.Mixed_6c
self.Mixed_6d = inception.Mixed_6d
self.Mixed_6e = inception.Mixed_6e
self.Mixed_7a = inception.Mixed_7a
self.Mixed_7b = inception.Mixed_7b
self.Mixed_7c = inception.Mixed_7c
self.Conv2d_6a_3x3 = BatchNormConv2d(288, 100, kernel_size=3, stride=1)
self.Conv2d_6b_3x3 = BatchNormConv2d(100, 20, kernel_size=3, stride=1)
self.SpatialSoftmax = nn.Softmax2d()
self.FullyConnected7a = Dense(31 * 31 * 20, 32)
self.FullyConnected7b = Dense(31 * 31 * 20, 4)
self.alpha = 10.0
def forward(self, x):
if self.transform_input:
if x.shape[1] == 4:
x = x[:, :-1].clone()
else:
x = x.clone()
x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x)
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
y = self.Mixed_5d(x)
# 33 x 33 x 100
x = self.Conv2d_6a_3x3(y)
# 31 x 31 x 20
x = self.Conv2d_6b_3x3(x)
# 31 x 31 x 20
xx = self.SpatialSoftmax(x)
# 32
x = self.FullyConnected7a(xx.view(xx.size()[0], -1))
aux = self.FullyConnected7b(xx.view(xx.size()[0], -1))
aux = self.normalize(aux)
# get 2,048d feature from inception backbone
# 35 x 35 x 288
y = self.Mixed_6a(y)
# 17 x 17 x 768
y = self.Mixed_6b(y)
# 17 x 17 x 768
y = self.Mixed_6c(y)
# 17 x 17 x 768
y = self.Mixed_6d(y)
# 17 x 17 x 768
y = self.Mixed_6e(y)
# 17 x 17 x 768
y = self.Mixed_7a(y)
# 8 x 8 x 1280
y = self.Mixed_7b(y)
# 8 x 8 x 2048
y = self.Mixed_7c(y)
# 8 x 8 x 2048
y = F.avg_pool2d(y, kernel_size=8)
# 1 x 1 x 2048
y = y.view(y.size(0), -1)
# Normalize output such that output lives on unit sphere.
# Multiply by alpha as in https://arxiv.org/pdf/1703.09507.pdf
return self.normalize(x) * self.alpha, x, aux
def define_model(pretrained=True):
return TCNModel(models.inception_v3(pretrained=pretrained))
class TCNDepthModel(EmbeddingNet):
def __init__(self, inception):
super(TCNDepthModel, self).__init__()
self.transform_input = True
self.Conv2d_1a_3x3 = inception.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = inception.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = inception.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = inception.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = inception.Conv2d_4a_3x3
self.Mixed_5b = inception.Mixed_5b
self.Mixed_5c = inception.Mixed_5c
self.Mixed_5d = inception.Mixed_5d
self.Conv2d_6a_3x3 = BatchNormConv2d(288, 100, kernel_size=3, stride=1)
self.Conv2d_6b_3x3 = BatchNormConv2d(100, 20, kernel_size=3, stride=1)
self.SpatialSoftmax = nn.Softmax2d()
self.FullyConnected7a = Dense(31 * 31 * 20, 32)
# Depth layers
self.Conv2d_depth_1a_3x3 = BatchNormConv2d(1, 64, kernel_size=3, stride=2)
self.Conv2d_depth_1b_3x3 = BatchNormConv2d(64, 32, kernel_size=3, stride=1)
self.Conv2d_depth_1c_3x3 = BatchNormConv2d(32, 10, kernel_size=3, stride=1)
self.SpatialSoftmax_depth = nn.Softmax2d()
self.FullyConnected2a_depth = Dense(72 * 72 * 10, 10)
self.alpha = 10.0
def forward(self, input):
# Numbers indicate dimensions AFTER passing through layer below the numbers
x = input[:, :-1] # RGB
d = input[:, -1] # Depth
d.unsqueeze_(1)
if self.transform_input:
x = copy(x)
x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x)
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 33 x 33 x 100
y = self.Conv2d_6a_3x3(x)
# 31 x 31 x 20
x = self.Conv2d_6b_3x3(y)
# 31 x 31 x 20
x = self.SpatialSoftmax(x)
# 31 x 31 x 20
x = self.FullyConnected7a(x.view(x.size()[0], -1))
# 32
# Depth
# 299 x 299 x1
d = self.Conv2d_depth_1a_3x3(d)
#
d = self.Conv2d_depth_1b_3x3(d)
#
d = self.Conv2d_depth_1c_3x3(d)
# 145 x 145 x 10
d = F.max_pool2d(d, kernel_size=3, stride=2)
# 72 x 72 x 10
d = self.SpatialSoftmax_depth(d)
# 72 x 72 x 10
d = self.FullyConnected2a_depth(d.view(d.size()[0], -1))
# 10
out = torch.cat([x, d], 1)
# 42
# Normalize output such that output lives on unit sphere.
# Multiply by alpha as in https://arxiv.org/pdf/1703.09507.pdf
return self.normalize(out) * self.alpha, out, y
class LTCNDepthModel(EmbeddingNet):
def __init__(self):
super(LTCNDepthModel, self).__init__()
self.transform_input = True
self.Conv2d_6a_3x3 = BatchNormConv2d(256, 100, kernel_size=2, stride=1)
# self.Conv2d_6b_3x3 = BatchNormConv2d(100, 20, kernel_size=3, stride=1)
self.SpatialSoftmax = nn.Softmax2d()
self.FullyConnected5a = Dense(6 * 6 * 100, 1000)
self.FullyConnected6a = Dense(1000, 500)
self.FullyConnected7a = Dense(500, 32)
# Depth layers
self.Conv2d_depth_1a_3x3 = BatchNormConv2d(1, 64, kernel_size=3, stride=2)
self.Conv2d_depth_1b_3x3 = BatchNormConv2d(64, 32, kernel_size=3, stride=1)
self.Conv2d_depth_1c_3x3 = BatchNormConv2d(32, 10, kernel_size=3, stride=1)
self.SpatialSoftmax_depth = nn.Softmax2d()
self.FullyConnected2a_depth = Dense(72 * 72 * 10, 10)
self.alpha = 10.0
def forward(self, frames, features):
# Numbers indicate dimensions AFTER passing through layer below the numbers
x = features # RGB
d = frames[:, -1, :, :] # Depth
d.unsqueeze_(1)
# 33 x 33 x 100
y = self.Conv2d_6a_3x3(x)
# 31 x 31 x 20
# x = self.Conv2d_6b_3x3(y)
# 31 x 31 x 20
x = self.SpatialSoftmax(y)
# 31 x 31 x 20
x = self.FullyConnected5a(x.view(x.size()[0], -1))
# 32
x = self.FullyConnected6a(x)
x = self.FullyConnected7a(x)
# Depth
# 299 x 299 x1
d = self.Conv2d_depth_1a_3x3(d)
#
d = self.Conv2d_depth_1b_3x3(d)
#
d = self.Conv2d_depth_1c_3x3(d)
# 145 x 145 x 10
d = F.max_pool2d(d, kernel_size=3, stride=2)
# 72 x 72 x 10
d = self.SpatialSoftmax_depth(d)
# 72 x 72 x 10
d = self.FullyConnected2a_depth(d.view(d.size()[0], -1))
# 10
out = torch.cat([x, d], 1)
# 42
# Normalize output such that output lives on unit sphere.
# Multiply by alpha as in https://arxiv.org/pdf/1703.09507.pdf
return self.normalize(out) * self.alpha, out, y
class LTCNModel(EmbeddingNet):
def __init__(self):
super(LTCNModel, self).__init__()
self.transform_input = True
self.Conv2d_6a_3x3 = BatchNormConv2d(256, 100, kernel_size=2, stride=1)
# self.Conv2d_6b_3x3 = BatchNormConv2d(100, 20, kernel_size=3, stride=1)
self.SpatialSoftmax = nn.Softmax2d()
self.FullyConnected4a = Dense(7 * 7 * 256, 1000)
self.FullyConnected5a = Dense(6 * 6 * 100, 1000)
self.FullyConnected6a = Dense(1000, 500)
self.FullyConnected7a = Dense(1000, 32)
self.alpha = 10.0
def forward(self, features):
# Numbers indicate dimensions AFTER passing through layer below the numbers
x = features # RGB
# 33 x 33 x 100
# y = self.Conv2d_6a_3x3(x)
# 31 x 31 x 20
# x = self.Conv2d_6b_3x3(y)
# 31 x 31 x 20
x = self.SpatialSoftmax(x)
# 31 x 31 x 20
x = self.FullyConnected4a(x.view(x.size()[0], -1))
# 32
# x = self.FullyConnected6a(x)
x = self.FullyConnected7a(x)
y = x
# Normalize output such that output lives on unit sphere.
# Multiply by alpha as in https://arxiv.org/pdf/1703.09507.pdf
return self.normalize(x) * self.alpha, x, y
def define_model_depth(pretrained=True):
return TCNDepthModel(models.inception_v3(pretrained=pretrained))
def define_model_ltcn_depth():
return LTCNDepthModel()
def define_model_ltcn():
return LTCNModel()
class EncoderRNN(nn.Module):
def __init__(self, feature_size, hidden_size, vocab_size, num_layers, max_seq_length=12):
"""Set the hyper-parameters and build the layers."""
super(EncoderRNN, self).__init__()
self.embed = nn.Embedding(vocab_size, feature_size)
self.lstm = nn.LSTM(feature_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
self.sigmoid = nn.Sigmoid()
self.max_seg_length = max_seq_length
def forward(self, features, lengths):
"""Decode image feature vectors and generates captions."""
packed = pack_padded_sequence(features, lengths, batch_first=True)
hiddens, _ = self.lstm(packed)
outputs = self.linear(hiddens[0][-1])
outputs = self.sigmoid(outputs)
# binary cross entropy - model each output as sigmoid
return outputs
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
"""Load the pretrained ResNet-152 and replace top fc layer."""
super(EncoderCNN, self).__init__()
inception = models.inception_v3(pretrained=True)
modules = list(inception.children())[:-1] # delete the last fc layer.
self.inception = nn.Sequential(*modules)
self.linear = nn.Linear(inception.fc.in_features, embed_size)
self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
def forward(self, shared_features):
"""Extract feature vectors from input images."""
# with torch.no_grad():
# features = self.inception(images)
features = shared_features.reshape(shared_features.size(0), -1)
features = self.bn(self.linear(features))
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers, max_seq_length=12):
"""Set the hyper-parameters and build the layers."""
super(DecoderRNN, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
self.max_seg_length = max_seq_length
def forward(self, features, captions, lengths):
"""Decode image feature vectors and generates captions."""
embeddings = self.embed(captions)
embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)
packed = pack_padded_sequence(embeddings, lengths, batch_first=True)
hiddens, _ = self.lstm(packed)
outputs = self.linear(hiddens[0])
return outputs
def sample(self, features, states=None):
"""Generate captions for given image features using greedy search."""
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(self.max_seg_length):
hiddens, states = self.lstm(inputs, states) # hiddens: (batch_size, 1, hidden_size)
outputs = self.linear(hiddens.squeeze(1)) # outputs: (batch_size, vocab_size)
_, predicted = outputs.max(1) # predicted: (batch_size)
sampled_ids.append(predicted)
inputs = self.embed(predicted) # inputs: (batch_size, embed_size)
inputs = inputs.unsqueeze(1) # inputs: (batch_size, 1, embed_size)
sampled_ids = torch.stack(sampled_ids, 1) # sampled_ids: (batch_size, max_seq_length)
return sampled_ids
|
the-stack_0_24790
|
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2020 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_portalbase.network`
================================================================================
Base Library for the Portal-style libraries.
* Author(s): Melissa LeBlanc-Williams
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import os
import time
import gc
from micropython import const
from adafruit_io.adafruit_io import IO_HTTP, AdafruitIO_RequestError
from adafruit_fakerequests import Fake_Requests
try:
import rtc
except ImportError:
rtc = None
try:
from secrets import secrets
except ImportError:
print(
"""WiFi settings are kept in secrets.py, please add them there!
the secrets dictionary must contain 'ssid' and 'password' at a minimum"""
)
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PortalBase.git"
# pylint: disable=line-too-long, too-many-lines
# you'll need to pass in an io username and key
TIME_SERVICE = (
"https://io.adafruit.com/api/v2/%s/integrations/time/strftime?x-aio-key=%s"
)
# our strftime is %Y-%m-%d %H:%M:%S.%L %j %u %z %Z see http://strftime.net/ for decoding details
# See https://apidock.com/ruby/DateTime/strftime for full options
TIME_SERVICE_FORMAT = "%Y-%m-%d %H:%M:%S.%L %j %u %z %Z"
LOCALFILE = "local.txt"
# pylint: enable=line-too-long
STATUS_NO_CONNECTION = (100, 0, 0)
STATUS_CONNECTING = (0, 0, 100)
STATUS_FETCHING = (200, 100, 0)
STATUS_DOWNLOADING = (0, 100, 100)
STATUS_CONNECTED = (0, 100, 0)
STATUS_DATA_RECEIVED = (0, 0, 100)
STATUS_OFF = (0, 0, 0)
CONTENT_TEXT = const(1)
CONTENT_JSON = const(2)
CONTENT_IMAGE = const(3)
class HttpError(Exception):
"""HTTP Specific Error"""
class NetworkBase:
"""Network Base Class for the Portal-style libraries.
:param wifi_module: An initialized WiFi Module that encapsulates the WiFi communications
:param bool extract_values: If true, single-length fetched values are automatically extracted
from lists and tuples. Defaults to ``True``.
:param debug: Turn on debug print outs. Defaults to False.
:param list secrets_data: An optional list in place of the data contained in the secrets.py file
"""
# pylint: disable=too-many-instance-attributes, too-many-locals, too-many-branches, too-many-statements
def __init__(
self, wifi_module, *, extract_values=True, debug=False, secrets_data=None
):
self._wifi = wifi_module
self._debug = debug
self.json_transform = []
self._extract_values = extract_values
self._json_types = [
"application/json",
"application/javascript",
"application/geo+json",
]
if secrets_data is not None:
self._secrets = secrets_data
else:
self._secrets = secrets
self.requests = None
try:
os.stat(LOCALFILE)
self.uselocal = True
except OSError:
self.uselocal = False
self._io_client = None
gc.collect()
def neo_status(self, value):
"""The status NeoPixel.
:param value: The color to change the NeoPixel.
"""
self._wifi.neo_status(value)
@staticmethod
def json_traverse(json, path):
"""
Traverse down the specified JSON path and return the value or values
:param json: JSON data to traverse
:param list path: The path that we want to follow
"""
value = json
if not isinstance(path, (list, tuple)):
raise ValueError(
"The json_path parameter should be enclosed in a list or tuple."
)
for x in path:
try:
value = value[x]
except (TypeError, KeyError, IndexError) as error:
raise ValueError(
"The specified json_path was not found in the results."
) from error
gc.collect()
return value
def add_json_transform(self, json_transform):
"""Add a function that is applied to JSON data when data is fetched
:param json_transform: A function or a list of functions to call with the parsed JSON.
Changes and additions are permitted for the ``dict`` object.
"""
if callable(json_transform):
self.json_transform.append(json_transform)
else:
self.json_transform.extend(filter(callable, json_transform))
@staticmethod
def url_encode(url):
"""
A function to perform minimal URL encoding
"""
url = url.replace(" ", "+")
url = url.replace("%", "%25")
url = url.replace(":", "%3A")
return url
def get_strftime(self, time_format, location=None):
"""
Fetch a custom strftime relative to your location.
:param str location: Your city and country, e.g. ``"America/New_York"``.
"""
# pylint: disable=line-too-long
self.connect()
api_url = None
reply = None
try:
aio_username = self._secrets["aio_username"]
aio_key = self._secrets["aio_key"]
except KeyError:
raise KeyError(
"\n\nOur time service requires a login/password to rate-limit. Please register for a free adafruit.io account and place the user/key in your secrets file under 'aio_username' and 'aio_key'" # pylint: disable=line-too-long
) from KeyError
if location is None:
location = self._secrets.get("timezone", location)
if location:
print("Getting time for timezone", location)
api_url = (TIME_SERVICE + "&tz=%s") % (aio_username, aio_key, location)
else: # we'll try to figure it out from the IP address
print("Getting time from IP address")
api_url = TIME_SERVICE % (aio_username, aio_key)
api_url += "&fmt=" + self.url_encode(time_format)
try:
response = self._wifi.requests.get(api_url, timeout=10)
if response.status_code != 200:
print(response)
error_message = (
"Error connecting to Adafruit IO. The response was: "
+ response.text
)
raise RuntimeError(error_message)
if self._debug:
print("Time request: ", api_url)
print("Time reply: ", response.text)
reply = response.text
except KeyError:
raise KeyError(
"Was unable to lookup the time, try setting secrets['timezone'] according to http://worldtimeapi.org/timezones" # pylint: disable=line-too-long
) from KeyError
# now clean up
response.close()
response = None
gc.collect()
return reply
def get_local_time(self, location=None):
# pylint: disable=line-too-long
"""
Fetch and "set" the local time of this microcontroller to the local time at the location, using an internet time API.
:param str location: Your city and country, e.g. ``"America/New_York"``.
"""
reply = self.get_strftime(TIME_SERVICE_FORMAT, location=location)
if reply:
times = reply.split(" ")
the_date = times[0]
the_time = times[1]
year_day = int(times[2])
week_day = int(times[3])
is_dst = None # no way to know yet
year, month, mday = [int(x) for x in the_date.split("-")]
the_time = the_time.split(".")[0]
hours, minutes, seconds = [int(x) for x in the_time.split(":")]
now = time.struct_time(
(year, month, mday, hours, minutes, seconds, week_day, year_day, is_dst)
)
if rtc is not None:
rtc.RTC().datetime = now
return reply
def wget(self, url, filename, *, chunk_size=12000):
"""Download a url and save to filename location, like the command wget.
:param url: The URL from which to obtain the data.
:param filename: The name of the file to save the data to.
:param chunk_size: how much data to read/write at a time.
"""
print("Fetching stream from", url)
self.neo_status(STATUS_FETCHING)
response = self._wifi.requests.get(url, stream=True)
headers = {}
for title, content in response.headers.items():
headers[title.lower()] = content
if response.status_code == 200:
print("Reply is OK!")
self.neo_status((0, 0, 100)) # green = got data
else:
if self._debug:
if "content-length" in headers:
print("Content-Length: {}".format(int(headers["content-length"])))
if "date" in headers:
print("Date: {}".format(headers["date"]))
self.neo_status((100, 0, 0)) # red = http error
raise HttpError(
"Code {}: {}".format(
response.status_code, response.reason.decode("utf-8")
)
)
if self._debug:
print(response.headers)
if "content-length" in headers:
content_length = int(headers["content-length"])
else:
raise RuntimeError("Content-Length missing from headers")
remaining = content_length
print("Saving data to ", filename)
stamp = time.monotonic()
with open(filename, "wb") as file:
for i in response.iter_content(min(remaining, chunk_size)): # huge chunks!
self.neo_status(STATUS_DOWNLOADING)
remaining -= len(i)
file.write(i)
if self._debug:
print(
"Read %d bytes, %d remaining"
% (content_length - remaining, remaining)
)
else:
print(".", end="")
if not remaining:
break
self.neo_status(STATUS_FETCHING)
response.close()
stamp = time.monotonic() - stamp
print(
"Created file of %d bytes in %0.1f seconds" % (os.stat(filename)[6], stamp)
)
self.neo_status(STATUS_OFF)
if not content_length == os.stat(filename)[6]:
raise RuntimeError
def connect(self, max_attempts=10):
"""
Connect to WiFi using the settings found in secrets.py
:param max_attempts: The maximum number of of attempts to connect to WiFi before
failing or use None to disable. Defaults to 10.
"""
self._wifi.neo_status(STATUS_CONNECTING)
attempt = 1
while not self._wifi.is_connected:
# secrets dictionary must contain 'ssid' and 'password' at a minimum
print("Connecting to AP", self._secrets["ssid"])
if (
self._secrets["ssid"] == "CHANGE ME"
or self._secrets["password"] == "CHANGE ME"
):
change_me = "\n" + "*" * 45
change_me += "\nPlease update the 'secrets.py' file on your\n"
change_me += "CIRCUITPY drive to include your local WiFi\n"
change_me += "access point SSID name in 'ssid' and SSID\n"
change_me += "password in 'password'. Then save to reload!\n"
change_me += "*" * 45
raise OSError(change_me)
self._wifi.neo_status(STATUS_NO_CONNECTION) # red = not connected
try:
self._wifi.connect(self._secrets["ssid"], self._secrets["password"])
self.requests = self._wifi.requests
except RuntimeError as error:
if max_attempts is not None and attempt >= max_attempts:
raise OSError(
"Maximum number of attempts reached when trying to connect to WiFi"
) from error
print("Could not connect to internet", error)
print("Retrying in 3 seconds...")
attempt += 1
time.sleep(3)
gc.collect()
def _get_io_client(self):
if self._io_client is not None:
return self._io_client
self.connect()
try:
aio_username = self._secrets["aio_username"]
aio_key = self._secrets["aio_key"]
except KeyError:
raise KeyError(
"Adafruit IO secrets are kept in secrets.py, please add them there!\n\n"
) from KeyError
self._io_client = IO_HTTP(aio_username, aio_key, self._wifi.requests)
return self._io_client
def push_to_io(self, feed_key, data, metadata=None, precision=None):
"""Push data to an adafruit.io feed
:param str feed_key: Name of feed key to push data to.
:param data: data to send to feed
:param dict metadata: Optional metadata associated with the data
:param int precision: Optional amount of precision points to send with floating point data
"""
io_client = self._get_io_client()
while True:
try:
feed_id = io_client.get_feed(feed_key)
except AdafruitIO_RequestError:
# If no feed exists, create one
feed_id = io_client.create_new_feed(feed_key)
except RuntimeError as exception:
print("An error occured, retrying! 1 -", exception)
continue
break
while True:
try:
io_client.send_data(feed_id["key"], data, metadata, precision)
except RuntimeError as exception:
print("An error occured, retrying! 2 -", exception)
continue
except NameError as exception:
print(feed_id["key"], data, exception)
continue
break
def get_io_feed(self, feed_key, detailed=False):
"""Return the Adafruit IO Feed that matches the feed key
:param str feed_key: Name of feed key to match.
:param bool detailed: Whether to return additional detailed information
"""
io_client = self._get_io_client()
while True:
try:
return io_client.get_feed(feed_key, detailed=detailed)
except RuntimeError as exception:
print("An error occured, retrying! 1 -", exception)
continue
break
def get_io_group(self, group_key):
"""Return the Adafruit IO Group that matches the group key
:param str group_key: Name of group key to match.
"""
io_client = self._get_io_client()
while True:
try:
return io_client.get_group(group_key)
except RuntimeError as exception:
print("An error occured, retrying! 1 -", exception)
continue
break
def get_io_data(self, feed_key):
"""Return all values from Adafruit IO Feed Data that matches the feed key
:param str feed_key: Name of feed key to receive data from.
"""
io_client = self._get_io_client()
while True:
try:
return io_client.receive_all_data(feed_key)
except RuntimeError as exception:
print("An error occured, retrying! 1 -", exception)
continue
break
def fetch(self, url, *, headers=None, timeout=10):
"""Fetch data from the specified url and return a response object
:param str url: The URL to fetch from.
:param dict headers: Extra headers to include in the request.
:param int timeout: The timeout period in seconds.
"""
gc.collect()
response = None
if self.uselocal:
print("*** USING LOCALFILE FOR DATA - NOT INTERNET!!! ***")
response = Fake_Requests(LOCALFILE)
if not response:
self.connect()
# great, lets get the data
print("Retrieving data...", end="")
self.neo_status(STATUS_FETCHING) # yellow = fetching data
gc.collect()
response = self._wifi.requests.get(url, headers=headers, timeout=timeout)
gc.collect()
return response
def add_json_content_type(self, content_type):
"""
Add a JSON content type
:param str type: The content JSON type like 'application/json'
"""
if isinstance(content_type, str):
self._json_types.append(content_type)
def _detect_content_type(self, headers):
if "content-type" in headers:
if "image/" in headers["content-type"]:
return CONTENT_IMAGE
for json_type in self._json_types:
if json_type in headers["content-type"]:
return CONTENT_JSON
return CONTENT_TEXT
def check_response(self, response):
"""
Check the response object status code, change the lights, and return content type
:param response: The response object from a network call
"""
headers = self._get_headers(response)
if self._debug:
print("Headers:", headers)
if response.status_code == 200:
print("Reply is OK!")
self.neo_status(STATUS_DATA_RECEIVED) # green = got data
content_type = self._detect_content_type(headers)
else:
if self._debug:
if "content-length" in headers:
print("Content-Length: {}".format(int(headers["content-length"])))
if "date" in headers:
print("Date: {}".format(headers["date"]))
self.neo_status((100, 0, 0)) # red = http error
raise HttpError(
"Code {}: {}".format(
response.status_code, response.reason.decode("utf-8")
)
)
return content_type
@staticmethod
def _get_headers(response):
headers = {}
for title, content in response.headers.items():
headers[title.lower()] = content
gc.collect()
return headers
def fetch_data(
self,
url,
*,
headers=None,
json_path=None,
regexp_path=None,
timeout=10,
):
"""Fetch data from the specified url and perfom any parsing
:param str url: The URL to fetch from.
:param dict headers: Extra headers to include in the request.
:param json_path: The path to drill down into the JSON data.
:param regexp_path: The path formatted as a regular expression to search
the text data.
:param int timeout: The timeout period in seconds.
"""
response = self.fetch(url, headers=headers, timeout=timeout)
return self._parse_data(response, json_path=json_path, regexp_path=regexp_path)
def _parse_data(
self,
response,
*,
json_path=None,
regexp_path=None,
):
json_out = None
content_type = self.check_response(response)
if content_type == CONTENT_JSON:
if json_path is not None:
# Drill down to the json path and set json_out as that node
if isinstance(json_path, (list, tuple)) and (
not json_path or not isinstance(json_path[0], (list, tuple))
):
json_path = (json_path,)
try:
gc.collect()
json_out = response.json()
if self._debug:
print(json_out)
gc.collect()
except ValueError: # failed to parse?
print("Couldn't parse json: ", response.text)
raise
except MemoryError as error:
raise MemoryError(
"{} (data is likely too large)".format(error)
) from error
if content_type == CONTENT_JSON:
values = self.process_json(json_out, json_path)
elif content_type == CONTENT_TEXT:
values = self.process_text(response.text, regexp_path)
# Clean up
json_out = None
response = None
if self._extract_values and len(values) == 1:
values = values[0]
gc.collect()
return values
@staticmethod
def process_text(text, regexp_path):
"""
Process text content
:param str text: The entire text content
:param regexp_path: The path formatted as a regular expression to search
the text data.
"""
values = []
if regexp_path:
import re # pylint: disable=import-outside-toplevel
for regexp in regexp_path:
values.append(re.search(regexp, text).group(1))
else:
values = text
return values
def process_json(self, json_data, json_path):
"""
Process JSON content
:param dict json_data: The JSON data as a dict
:param json_path: The path to drill down into the JSON data.
"""
values = []
# optional JSON post processing, apply any transformations
# these MAY change/add element
for idx, json_transform in enumerate(self.json_transform):
try:
json_transform(json_data)
except Exception as error:
print("Exception from json_transform: ", idx, error)
raise
# extract desired text/values from json
if json_data is not None and json_path:
for path in json_path:
try:
values.append(self.json_traverse(json_data, path))
except KeyError:
print(json_data)
raise
else:
# No path given, so return JSON as string for compatibility
import json # pylint: disable=import-outside-toplevel
values = json.dumps(json_data)
return values
|
the-stack_0_24792
|
"""
State Trie
^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
The state trie is the structure responsible for storing
`eth1spec.eth_types.Account` objects.
"""
import copy
from dataclasses import dataclass, field
from typing import (
Callable,
Dict,
Generic,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
TypeVar,
Union,
cast,
)
from ethereum.crypto.hash import keccak256
from ethereum.utils.ensure import ensure
from ethereum.utils.hexadecimal import hex_to_bytes
from .. import rlp
from ..base_types import U256, Bytes, Uint, slotted_freezable
from .eth_types import (
Account,
Address,
Receipt,
Root,
Transaction,
encode_account,
)
# note: an empty trie (regardless of whether it is secured) has root:
#
# keccak256(RLP(b''))
# ==
# 56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421 # noqa: E501,SC10
#
# also:
#
# keccak256(RLP(()))
# ==
# 1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347 # noqa: E501,SC10
#
# which is the sha3Uncles hash in block header with no uncles
EMPTY_TRIE_ROOT = Root(
hex_to_bytes(
"56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
)
)
Node = Union[Account, Bytes, Transaction, Receipt, Uint, U256, None]
K = TypeVar("K", bound=Bytes)
V = TypeVar(
"V",
Optional[Account],
Optional[Bytes],
Bytes,
Optional[Transaction],
Optional[Receipt],
Uint,
U256,
)
@slotted_freezable
@dataclass
class LeafNode:
"""Leaf node in the Merkle Trie"""
rest_of_key: Bytes
value: rlp.RLP
@slotted_freezable
@dataclass
class ExtensionNode:
"""Extension node in the Merkle Trie"""
key_segment: Bytes
subnode: rlp.RLP
@slotted_freezable
@dataclass
class BranchNode:
"""Branch node in the Merkle Trie"""
subnodes: List[rlp.RLP]
value: rlp.RLP
InternalNode = Union[LeafNode, ExtensionNode, BranchNode]
def encode_internal_node(node: Optional[InternalNode]) -> rlp.RLP:
"""
Encodes a Merkle Trie node into its RLP form. The RLP will then be
serialized into a `Bytes` and hashed unless it is less that 32 bytes
when serialized.
This function also accepts `None`, representing the absence of a node,
which is encoded to `b""`.
Parameters
----------
node : Optional[InternalNode]
The node to encode.
Returns
-------
encoded : `rlp.RLP`
The node encoded as RLP.
"""
unencoded: rlp.RLP
if node is None:
unencoded = b""
elif isinstance(node, LeafNode):
unencoded = (
nibble_list_to_compact(node.rest_of_key, True),
node.value,
)
elif isinstance(node, ExtensionNode):
unencoded = (
nibble_list_to_compact(node.key_segment, False),
node.subnode,
)
elif isinstance(node, BranchNode):
unencoded = node.subnodes + [node.value]
else:
raise AssertionError(f"Invalid internal node type {type(node)}!")
encoded = rlp.encode(unencoded)
if len(encoded) < 32:
return unencoded
else:
return keccak256(encoded)
def encode_node(node: Node, storage_root: Optional[Bytes] = None) -> Bytes:
"""
Encode a Node for storage in the Merkle Trie.
Currently mostly an unimplemented stub.
"""
if isinstance(node, Account):
assert storage_root is not None
return encode_account(node, storage_root)
elif isinstance(node, (Transaction, Receipt, U256)):
return rlp.encode(cast(rlp.RLP, node))
elif isinstance(node, Bytes):
return node
else:
raise AssertionError(
f"encoding for {type(node)} is not currently implemented"
)
@dataclass
class Trie(Generic[K, V]):
"""
The Merkle Trie.
"""
secured: bool
default: V
_data: Dict[K, V] = field(default_factory=dict)
def copy_trie(trie: Trie[K, V]) -> Trie[K, V]:
"""
Create a copy of `trie`. Since only frozen objects may be stored in tries,
the contents are reused.
Parameters
----------
trie: `Trie`
Trie to copy.
Returns
-------
new_trie : `Trie[K, V]`
A copy of the trie.
"""
return Trie(trie.secured, trie.default, copy.copy(trie._data))
def trie_set(trie: Trie[K, V], key: K, value: V) -> None:
"""
Stores an item in a Merkle Trie.
This method deletes the key if `value == trie.default`, because the Merkle
Trie represents the default value by omitting it from the trie.
Parameters
----------
trie: `Trie`
Trie to store in.
key : `Bytes`
Key to lookup.
value : `V`
Node to insert at `key`.
"""
if value == trie.default:
if key in trie._data:
del trie._data[key]
else:
trie._data[key] = value
def trie_get(trie: Trie[K, V], key: K) -> V:
"""
Gets an item from the Merkle Trie.
This method returns `trie.default` if the key is missing.
Parameters
----------
trie:
Trie to lookup in.
key :
Key to lookup.
Returns
-------
node : `V`
Node at `key` in the trie.
"""
return trie._data.get(key, trie.default)
def common_prefix_length(a: Sequence, b: Sequence) -> int:
"""
Find the longest common prefix of two sequences.
"""
for i in range(len(a)):
if i >= len(b) or a[i] != b[i]:
return i
return len(a)
def nibble_list_to_compact(x: Bytes, is_leaf: bool) -> Bytes:
"""
Compresses nibble-list into a standard byte array with a flag.
A nibble-list is a list of byte values no greater than `15`. The flag is
encoded in high nibble of the highest byte. The flag nibble can be broken
down into two two-bit flags.
Highest nibble::
+---+---+----------+--------+
| _ | _ | is_leaf | parity |
+---+---+----------+--------+
3 2 1 0
The lowest bit of the nibble encodes the parity of the length of the
remaining nibbles -- `0` when even and `1` when odd. The second lowest bit
is used to distinguish leaf and extension nodes. The other two bits are not
used.
Parameters
----------
x :
Array of nibbles.
is_leaf :
True if this is part of a leaf node, or false if it is an extension
node.
Returns
-------
compressed : `bytearray`
Compact byte array.
"""
compact = bytearray()
if len(x) % 2 == 0: # ie even length
compact.append(16 * (2 * is_leaf))
for i in range(0, len(x), 2):
compact.append(16 * x[i] + x[i + 1])
else:
compact.append(16 * ((2 * is_leaf) + 1) + x[0])
for i in range(1, len(x), 2):
compact.append(16 * x[i] + x[i + 1])
return Bytes(compact)
def bytes_to_nibble_list(bytes_: Bytes) -> Bytes:
"""
Converts a `Bytes` into to a sequence of nibbles (bytes with value < 16).
Parameters
----------
bytes_:
The `Bytes` to convert.
Returns
-------
nibble_list : `Bytes`
The `Bytes` in nibble-list format.
"""
nibble_list = bytearray(2 * len(bytes_))
for byte_index, byte in enumerate(bytes_):
nibble_list[byte_index * 2] = (byte & 0xF0) >> 4
nibble_list[byte_index * 2 + 1] = byte & 0x0F
return Bytes(nibble_list)
def _prepare_trie(
trie: Trie[K, V],
get_storage_root: Callable[[Address], Root] = None,
) -> Mapping[Bytes, Bytes]:
"""
Prepares the trie for root calculation. Removes values that are empty,
hashes the keys (if `secured == True`) and encodes all the nodes.
Parameters
----------
trie :
The `Trie` to prepare.
get_storage_root :
Function to get the storage root of an account. Needed to encode
`Account` objects.
Returns
-------
out : `Mapping[eth1spec.base_types.Bytes, Node]`
Object with keys mapped to nibble-byte form.
"""
mapped: MutableMapping[Bytes, Bytes] = {}
for (preimage, value) in trie._data.items():
if isinstance(value, Account):
assert get_storage_root is not None
address = Address(preimage)
encoded_value = encode_node(value, get_storage_root(address))
else:
encoded_value = encode_node(value)
# Empty values are represented by their absence
ensure(encoded_value != b"", AssertionError)
key: Bytes
if trie.secured:
# "secure" tries hash keys once before construction
key = keccak256(preimage)
else:
key = preimage
mapped[bytes_to_nibble_list(key)] = encoded_value
return mapped
def root(
trie: Trie[K, V],
get_storage_root: Callable[[Address], Root] = None,
) -> Root:
"""
Computes the root of a modified merkle patricia trie (MPT).
Parameters
----------
trie :
`Trie` to get the root of.
get_storage_root :
Function to get the storage root of an account. Needed to encode
`Account` objects.
Returns
-------
root : `eth1spec.eth_types.Root`
MPT root of the underlying key-value pairs.
"""
obj = _prepare_trie(trie, get_storage_root)
root_node = encode_internal_node(patricialize(obj, Uint(0)))
if len(rlp.encode(root_node)) < 32:
return keccak256(rlp.encode(root_node))
else:
assert isinstance(root_node, Bytes)
return Root(root_node)
def patricialize(
obj: Mapping[Bytes, Bytes], level: Uint
) -> Optional[InternalNode]:
"""
Structural composition function.
Used to recursively patricialize and merkleize a dictionary. Includes
memoization of the tree structure and hashes.
Parameters
----------
obj :
Underlying trie key-value pairs, with keys in nibble-list format.
level :
Current trie level.
Returns
-------
node : `eth1spec.base_types.Bytes`
Root node of `obj`.
"""
if len(obj) == 0:
return None
arbitrary_key = next(iter(obj))
# if leaf node
if len(obj) == 1:
leaf = LeafNode(arbitrary_key[level:], obj[arbitrary_key])
return leaf
# prepare for extension node check by finding max j such that all keys in
# obj have the same key[i:j]
substring = arbitrary_key[level:]
prefix_length = len(substring)
for key in obj:
prefix_length = min(
prefix_length, common_prefix_length(substring, key[level:])
)
# finished searching, found another key at the current level
if prefix_length == 0:
break
# if extension node
if prefix_length > 0:
prefix = arbitrary_key[level : level + prefix_length]
return ExtensionNode(
prefix,
encode_internal_node(patricialize(obj, level + prefix_length)),
)
branches: List[MutableMapping[Bytes, Bytes]] = []
for _ in range(16):
branches.append({})
value = b""
for key in obj:
if len(key) == level:
# shouldn't ever have an account or receipt in an internal node
if isinstance(obj[key], (Account, Receipt, Uint)):
raise AssertionError
value = obj[key]
else:
branches[key[level]][key] = obj[key]
return BranchNode(
[
encode_internal_node(patricialize(branches[k], level + 1))
for k in range(16)
],
value,
)
|
the-stack_0_24793
|
import os
import numpy as np
from msibi.potentials import tail_correction
from msibi.utils.exceptions import UnsupportedEngine
from msibi.workers import run_query_simulations
class MSIBI(object):
"""Management class for orchestrating an MSIBI optimization.
Parameters
----------
rdf_cutoff : float
The upper cutoff value for the RDF calculation.
n_rdf_points : int
The number of radius values used in RDF calculations.
max_frames : int, default 10
The maximum number of frames to include at once in RDF calculation
The RDF calculation will accumulate an average RDF over the range
[-max_frames:-1] of the trajectory. This is also how a target_rdf
will be calculated in Pair.add_state
pot_cutoff : float, default None
The upper cutoff value for the potential. If None is provided,
rdf_cutoff is used.
r_switch : float, default None
The radius after which a tail correction is applied. If None is
provided, pot_r[-5] is used.
rdf_exclude_bonded : bool, default False
Whether the RDF calculation should exclude correlations between bonded
species.
smooth_rdfs : bool, default False
Whether to use a smoothing function to reduce the noise in the RDF data.
verbose : bool, default False
Whether to provide more information for debugging.
Attributes
----------
states : list of States
All states to be used in the optimization procedure.
pairs : list of Pairs
All pairs to be used in the optimization procedure.
bonds : list of Bonds
All bonds to be used in the optimization procedure.
angles : list of Angles
All angles to be used in the optimization procedure.
n_iterations : int
The number of MSIBI iterations to perform.
max_frames : int
The maximum number of frames to include at once in RDF calculation
rdf_cutoff : float
The upper cutoff value for the RDF calculation.
n_rdf_points : int
The number of radius values used in the RDF calculation.
dr : float
The spacing of radius values.
rdf_exclude_bonded : bool
Whether the RDF calculation should exclude correlations between bonded
species.
pot_cutoff : float
The upper cutoff value for the potential.
pot_r : np.ndarray, shape=int((rdf_cutoff + dr) / dr)
The radius values at which the potential is computed.
r_switch : float
The radius after which a tail correction is applied.
"""
def __init__(
self,
rdf_cutoff,
n_rdf_points,
max_frames=10,
pot_cutoff=None,
r_switch=None,
rdf_exclude_bonded=False,
smooth_rdfs=False,
verbose=False
):
rmin = 1e-4
self.verbose = verbose
self.states = []
self.pairs = []
self.bonds = []
self.angles = []
self.n_iterations = 10 # Can be overridden in optimize().
self.max_frames = max_frames
self.rdf_cutoff = rdf_cutoff
self.n_rdf_points = n_rdf_points
self.dr = rdf_cutoff / (n_rdf_points - 1)
self.rdf_exclude_bonded = rdf_exclude_bonded
self.smooth_rdfs = smooth_rdfs
self.rdf_r_range = np.array([rmin, self.rdf_cutoff + self.dr])
self.rdf_n_bins = self.n_rdf_points
# Sometimes the pot_cutoff and rdf_cutoff have different ranges,
# e.g. to look at long-range correlations
if not pot_cutoff:
pot_cutoff = rdf_cutoff
self.pot_cutoff = pot_cutoff
self.pot_r = np.arange(rmin, self.pot_cutoff + self.dr, self.dr)
if not r_switch:
r_switch = self.pot_r[-5]
self.r_switch = r_switch
def add_state(self, state):
state._opt = self
self.states.append(state)
def add_pair(self, pair):
self.pairs.append(pair)
def add_bond(self, bond):
self.bonds.append(bond)
def add_angle(self, angle):
self.angles.append(angle)
def optimize(
self,
integrator,
integrator_kwargs,
dt,
gsd_period,
n_iterations=10,
start_iteration=0,
n_steps=1e6,
engine="hoomd",
_dir=None
):
"""Optimize the pair potentials
Parameters
----------
integrator : str, required
The integrator to use in the query simulation.
See hoomd-blue.readthedocs.io/en/v2.9.6/module-md-integrate.html
integrator_kwargs : dict, required
The args and their values required by the integrator chosen
dt : float, required
The time step delta
gsd_period : int, required
The number of frames between snapshots written to query.gsd
n_iterations : int, default 10
Number of iterations.
start_iteration : int, default 0
Start optimization at start_iteration, useful for restarting.
n_steps : int, default=1e6
How many steps to run the query simulations
The frequency to write trajectory information to query.gsd
engine : str, default "hoomd"
Engine that runs the simulations.
References
----------
Please cite the following paper:
.. [1] T.C. Moore et al., "Derivation of coarse-grained potentials via
multistate iterative Boltzmann inversion," Journal of Chemical
Physics, vol. 140, pp. 224104, 2014.
"""
if integrator == "hoomd.md.integrate.nve":
raise ValueError("The NVE ensemble is not supported with MSIBI")
for pair in self.pairs:
for state in self.states:
pair._add_state(state, smooth=self.smooth_rdfs)
if self.bonds:
for bond in self.bonds:
for state in self.states:
bond._add_state(state)
if self.angles:
for angle in self.angles:
for state in self.states:
angle._add_state(state)
if engine == "hoomd":
import hoomd
HOOMD_VERSION = 2
else:
HOOMD_VERSION = None
if self.verbose:
print(f"Using HOOMD version {HOOMD_VERSION}.")
for state in self.states:
state.HOOMD_VERSION = HOOMD_VERSION
self.n_iterations = n_iterations
self._initialize(
engine=engine,
n_steps=int(n_steps),
integrator=integrator,
integrator_kwargs=integrator_kwargs,
dt=dt,
gsd_period=gsd_period,
potentials_dir=_dir)
for n in range(start_iteration + self.n_iterations):
print(f"-------- Iteration {n} --------")
run_query_simulations(self.states, engine=engine)
self._update_potentials(n, engine)
def _update_potentials(self, iteration, engine):
"""Update the potentials for each pair. """
for pair in self.pairs:
self._recompute_rdfs(pair, iteration)
pair.update_potential(self.pot_r, self.r_switch, self.verbose)
pair.save_table_potential(
self.pot_r, self.dr, iteration=iteration, engine=engine
)
def _recompute_rdfs(self, pair, iteration):
"""Recompute the current RDFs for every state used for a given pair."""
for state in self.states:
pair.compute_current_rdf(
state,
smooth=self.smooth_rdfs,
verbose=self.verbose
)
pair.save_current_rdf(state, iteration=iteration, dr=self.dr)
print(
"pair {0}, state {1}, iteration {2}: {3:f}".format(
pair.name,
state.name,
iteration,
pair._states[state]["f_fit"][iteration]
)
)
def _initialize(
self,
engine,
n_steps,
integrator,
integrator_kwargs,
dt,
gsd_period,
potentials_dir
):
"""Create initial table potentials and the simulation input scripts.
Parameters
----------
engine : str, default 'hoomd'
Engine used to run simulations
potentials_dir : path, default None
Directory to store potential files. If None is given, a "potentials"
folder in the current working directory is used.
"""
if potentials_dir is None:
self.potentials_dir = os.path.join(os.getcwd(), "potentials")
else:
self.potentials_dir = potentials_dir
if not os.path.isdir(self.potentials_dir):
os.mkdir(self.potentials_dir)
if not os.path.isdir("rdfs"):
os.mkdir("rdfs")
table_potentials = []
bonds = None
angles = None
for pair in self.pairs:
potential_file = os.path.join(
self.potentials_dir, f"pot.{pair.name}.txt"
)
pair.potential_file = potential_file
table_potentials.append((pair.type1, pair.type2, potential_file))
V = tail_correction(self.pot_r, pair.potential, self.r_switch)
pair.potential = V
# This file is written for viewing of how the potential evolves.
pair.save_table_potential(
self.pot_r, self.dr, iteration=0, engine=engine
)
# This file is overwritten at each iteration and actually used for
# performing the query simulations.
pair.save_table_potential(self.pot_r, self.dr, engine=engine)
if self.bonds:
bonds = self.bonds
if self.angles:
angles = self.angles
for state in self.states:
state.save_runscript(
n_steps=n_steps,
integrator=integrator,
integrator_kwargs=integrator_kwargs,
dt=dt,
gsd_period=gsd_period,
table_potentials=table_potentials,
table_width=len(self.pot_r),
engine=engine,
bonds=bonds,
angles=angles
)
|
the-stack_0_24794
|
from __future__ import absolute_import, division, print_function
import sys
import math
from datetime import datetime
from libtbx.program_template import ProgramTemplate
#from libtbx.utils import null_out
from libtbx import group_args, phil
from libtbx.str_utils import make_sub_header
from libtbx.utils import Sorry
import mmtbx
import mmtbx_probe_ext as probeExt
from mmtbx.probe import Helpers
from iotbx import pdb
from iotbx.pdb import common_residue_names_get_class
version = "0.8.0"
master_phil_str = '''
run_tests = False
.type = bool
.help = Run unit tests before doing the requested operations
source_selection = "(altid a or altid '' or altid ' ') and occupancy > 0.33"
.type = atom_selection
.help = Source selection description
target_selection = None
.type = atom_selection
.help = Target selection description ('=' means same as source)
use_neutron_distances = False
.type = bool
.help = Use neutron distances (-nuclear in probe)
approach = *self both once surface count_atoms
.type = choice
.help = self (src -> src) both (src <=> targ) once (src -> targ) surface (VdW surface) count_atoms (count atoms)
excluded_bond_chain_length = 4
.type = int
.help = Exclude chain of atoms bonded to source for this many hops (-4H, -3, -2 , -1 in probe)
drop_non_selected_atoms = False
.type = bool
.help = Drop non selected atoms (-drop in probe)
use_polar_hydrogens = True
.type = bool
.help = Use polar hydrogens (-usepolarh in probe)
minimum_polar_hydrogen_occupancy = 0.7
.type = float
.help = Minimum occupancy for polar hydrogens (0.25 in probe)
maximum_polar_hydrogen_b = 40.0
.type = float
.help = Minimum b-factor for polar hydrogens (80.0 in probe)
include_mainchain_mainchain = True
.type = bool
.help = Include mainchain -> mainchain interactions (-mc in probe)
include_water_water = False
.type = bool
.help = Include water-to-water interactions (-wat2wat in probe)
keep_unselected_atoms = True
.type = bool
.help = Include atoms that are not selected in the collision neighbor lists (-keep, -drop, -scsurface, -exposed, -asurface, -access in probe)
atom_radius_scale = 1.0
.type = float
.help = Atom radius = (r*atom_radius_scale)+atom_radius_offset (-scalevds, -vswscale in probe)
atom_radius_offset = 0.0
.type = float
.help = Atom radius = (r*atom_radius_scale)+atom_radius_offset (-addvdw in probe)
probe
.style = menu_item auto_align
{
radius = 0.25
.type = float
.help = Probe radius (half distance between touched atoms) (-radius in probe)
density = 16.0
.type = float
.help = Probe dots per square angstrom on atom surface (-density in probe)
overlap_scale_factor = 0.5
.type = float
.help = Fraction of overlap assigned to each atom (-spike in probe)
minimum_occupancy = 0.02
.type = float
.help = Minimum occupancy for a source atom (-minoccupancy in probe)
worse_clash_cutoff = 0.5
.type = float
.help = Cutoff for worse clashes, a positive (-divworse in probe)
clash_cutoff = 0.4
.type = float
.help = Cutoff for the clashes, a positive number (-divlow in probe)
contact_cutoff = 0.25
.type = float
.help = Cutoff for the contact (-divhigh in probe)
uncharged_hydrogen_cutoff = 0.6
.type = float
.help = Cutoff for uncharged hydrogen overlap (-hbregular in probe)
charged_hydrogen_cutoff = 0.8
.type = float
.help = Cutoff for charged hydrogen overlap (-hbcharged in probe)
bump_weight = 10.0
.type = float
.help = Weight applied to bump score (-bumpweight in probe)
hydrogen_bond_weight = 4.0
.type = float
.help = Weight applied to hydrogen bond score (-hbweight in probe)
gap_weight = 0.25
.type = float
.help = Weight applied to gap score (-gapweight in probe)
allow_weak_hydrogen_bonds = False
.type = bool
.help = Separately account for weak hydrogen bonds (-LweakHbonds in probe)
implicit_hydrogens = False
.type = bool
.help = Use implicit hydrogens, no water proxies (-implicit in probe)
use_original_probe_tables = False
.type = bool
.help = Use the original Probe tables rather than CCTBX tables by default (for regression tests)
}
output
.style = menu_item auto_align
{
file_name = None
.type = str
.short_caption = Output file name
.help = Output file name
format = *standard raw oneline
.type = choice
.help = Type of output to write (-oneline -unformated -kinemage in probe)
contact_summary = False
.type = bool
.help = Report summary of contacts (-oneline, -summary in probe)
condensed = False
.type = bool
.help = Condensed output format (-condense, -kinemage in probe)
count_dots = False
.type = bool
.help = Count dots rather than listing all contacts (-countdots in probe)
hydrogen_bond_output = True
.type = bool
.help = Output hydrogen-bond contacts (-nohbout in probe)
record_added_hydrogens = False
.type = bool
.help = Output hydrogen-bond contacts (-dumph2o in probe)
clash_output = True
.type = bool
.help = Output clash contacts (-noclashout in probe)
vdw_output = True
.type = bool
.help = Output van der Waals contacts (-novdwout in probe)
separate_worse_clashes = False
.type = bool
.help = Separately report worse clashes (-sepworse in probe)
group_name = ""
.type = str
.help = Specify the group name (-name in probe)
add_group_name_master_line = False
.type = bool
.help = Add a master=name line on lists (-dotmaster in probe)
add_group_line = True
.type = bool
.help = Add a group line on kinemage output (-nogroup in probe)
add_kinemage_keyword = False
.type = bool
.help = Add kinemage 1 to beginning of kin file (-kinemage in probe)
add_lens_keyword = False
.type = bool
.help = Add lens keywoard to kin file (-lens, -nolens in probe)
add_group_statement = True
.type = bool
.help = Add lens keywoard to kin file (-nogroup in probe)
color_by_dna_base = False
.type = bool
.help = Color by DNA base (-basecolor, -colorbase in probe)
group_label = ""
.type = str
.help = Label for the surface-dots group (-name, -scsurface, -exposed, -asurface, -access in probe)
bin_gaps = False
.type = bool
.help = Bin the gaps (-gapbins in probe)
merge_contacts = True
.type = bool
.help = Combine wide and close contacts (True in probe)
report_hydrogen_bonds = True
.type = bool
.help = Report hydrogen bonds (-nohbout in probe)
report_clashes = True
.type = bool
.help = Report clashes (-noclashout in probe)
report_vdws = True
.type = bool
.help = Report van der Waals contects (-novdwout in probe)
only_report_bad_clashes = False
.type = bool
.help = Only report bad clashes (-onlybadout in probe)
atoms_are_masters = False
.type = bool
.help = Atoms are listed as masters (-element in probe)
color_by_gap = True
.type = bool
.help = Assign a color to reported gaps (-atomcolor, -gapcolor, -basecolor in probe)
default_point_color = "gray"
.type = str
.help = Default color for output points (-outcolor in probe)
compute_scores = True
.type = bool
.help = Compute scores rather than just counting dots (-spike, -nospike in probe)
}
'''
program_citations = phil.parse('''
citation {
authors = Word, et. al.
journal = J. Mol. Biol.
volume = 285
pages = 1711-1733
year = 1999
external = True
}
''')
# ------------------------------------------------------------------------------
class Program(ProgramTemplate):
description = '''
Probe2 version {}
Compute the MolProbity Probe score for a file, or a subset of the file.
Produce summaries or lists of all contacts, in Kinemage or raw format, depending
on PHIL parameters.
Inputs:
PDB or mmCIF file containing atomic model
Ligand CIF file, if needed
Output:
Kinemage file describing the score and other information, depending on the parameters.
Note:
Some approaches require the target_selection parameter. Setting the
target_selection to "=" will re-use the source for the target. In all
other cases, the string passed in will be used as a CCTBX selection on
the model to select a subset of its atoms.
The original Probe program had two ways to specify whether HET atoms were included
and whether water atoms were include, in the selection description and as separate
command-line arguments. The command-line arguments are not present in Probe2, they
must be specified as part of the selection criteria. Also Probe2 does not break out
aromatic Carbons as Car in a separate category when counting dots, they are treated
as C for reporting purposes.
The most simple dotkin:
mmtbx.probe2 approach=self source_selection="all" output.file_name=out.kin input.pdb
Equivalent PHIL arguments for original Probe command-line options:
-defaults:
source_selection="(altid a or altid '' or altid ' ') and occupancy > 0.33"
approach=self
excluded_bond_chain_length=4
include_mainchain_mainchain=True
-kinemage:
output.add_kinemage_keyword=True
output.count_dots=False
output.format=standard
output.condensed=False
-scsurface:
approach=surface
source_selection="not water"
keep_unselected_atoms=False
probe.radius=1.4
group_name="SCS"
-exposed:
approach=surface
source_selection="(altid a or altid '' or altid ' ') and occupancy > 0.33"
keep_unselected_atoms=False
probe.radius=1.4
group_name="SCS"
-asurface:
approach=surface
source_selection="not water"
keep_unselected_atoms=False
probe.radius=0.0
group_name="AS"
-access:
approach=surface
source_selection="not water"
keep_unselected_atoms=False
atom_radius_offset=1.4
probe.radius=0.0
group_name="AS"
-scan 0:
source_selection="(altid a or altid '' or altid ' ') and bfactor < 40 occupancy > 0.33"
approach=self
excluded_bond_chain_length=4
include_mainchain_mainchain=True
-scan 1:
approach=once
excluded_bond_chain_length=4
source_selection="(altid a or altid '' or altid ' ') and bfactor < 40 and occupancy > 0.33" blt40 ogt65
target_selection="((altid a or altid '' or altid ' ') and bfactor < 40 and occupancy > 0.65) or (not water and occupancy > 0.33)"
'''.format(version)
datatypes = ['model', 'restraint', 'phil']
master_phil_str = master_phil_str
data_manager_options = ['model_skip_expand_with_mtrix']
citations = program_citations
epilog = '''
For additional information and help, see http://kinemage.biochem.duke.edu/software/probe
and http://molprobity.biochem.duke.edu
'''
# ------------------------------------------------------------------------------
def _totalInteractionCount(self, chainCounts):
'''
Find the total count of interactions of any type for the specified chain-pair type.
:param chainCounts: One of the structures that hold the counts of interaction
types for a given pair of chain types: _MCMCCount, _SCSCCount, _MCSCCount, _otherCount,
or _sumCount.
:return: Sum of results across all interaction types.
'''
ret = 0
for v in chainCounts.values():
ret += v
return ret
# ------------------------------------------------------------------------------
def _scaled_atom_radius(self, a):
'''
Find the scaled and offset radius for the specified atom. This will be called on each
atom after their extra information has been loaded to determine the scaled and offset
value to use for the remainder of the program.
:param a: Atom whose radius is to be scaled
:return: Scaled and offset radius of the atom.
'''
rad = self._extraAtomInfo.getMappingFor(a).vdwRadius
if rad <= 0:
alt = a.parent().altloc
resName = a.parent().resname.strip().upper()
resID = str(a.parent().parent().resseq_as_int())
chainID = a.parent().parent().parent().id
myFullName = "chain "+str(chainID)+" "+resName+" "+resID+" "+a.name+" "+alt
raise Sorry("Invalid radius for atom look-up: "+myFullName+"; rad = "+str(rad))
return self.params.atom_radius_offset + (rad * self.params.atom_radius_scale)
# ------------------------------------------------------------------------------
def _atom_class_for(self, a):
'''
Assign the atom class for a specified atom.
:param a: Atom whose class is to be specified
:return: If our parameters have been set to color and sort by NA base,
then it returns the appropriate base name. Otherwise, it returns the
element of the atom.
'''
if not self.params.output.color_by_dna_base:
return a.element
else:
resName = a.parent().resname
cl = common_residue_names_get_class(name = resName)
if cl == "common_rna_dna" or cl == "modified_rna_dna":
cleanName = resName.upper().strip()
if cleanName in ['U','URA','UTP','UDP','UMP','UR',
'T','THY','TTP','TDP','TMP','5MU','DT','TR']:
return 't/u'
elif cleanName in ['A','ADE','ATP','ADP','AMP','1MA','RIA','T6A','DA','AR']:
return 'a'
elif cleanName in ['C','CYT','CTP','CDP','CMP','5MC','OMC','DC','CR']:
return 'c'
elif cleanName in ['G','GUA','GTP','GDP','GMP','GSP','1MG','2MG','M2G','7MG','OMG','DG','GR']:
return 'g'
return 'other na'
else:
return "nonbase"
# ------------------------------------------------------------------------------
def _color_for_gap(self, gap, interactionType):
'''
Report the color associated with a gap (and interaction type).
:param gap: Size of the gap in Angstroms.
:param interactionType: InteractionType of the dot.
:return: Kinemage name of the color associated with the class.
'''
if interactionType == probeExt.InteractionType.StandardHydrogenBond:
return "greentint "
elif gap > 0.35:
return "blue "
elif gap > 0.25:
return "sky "
elif gap > 0.15:
return "sea "
elif gap > 0.0:
return "green "
elif gap > -0.1:
return "yellowtint "
elif gap > -0.2:
return "yellow "
elif gap > -0.3:
return "orange "
elif gap > -0.4:
return "red "
else:
return "hotpink "
# ------------------------------------------------------------------------------
def _color_for_atom_class(self, c):
'''
Report the color associated with an atom class.
Based on atomprops.h:INIT_ATOM_TABLE from original probe.
:param c: Class of the atom.
:return: Kinemage name of the color associated with the class.
'''
# Make sure the atom class is one that we know about
if not c in self._allAtomClasses:
return 'magenta'
# Check to see if this atom belongs to one of the special colors.
if c in ['C','Ag','other na']:
return 'white'
elif c in ['N','He','t/u']:
return 'sky'
elif c in ['O']:
return 'red'
elif c in ['P','Ne','a']:
return 'pink'
elif c in ['S','c']:
return 'yellow'
elif c in ['Se','F','Cl']:
return 'green'
elif c in ['Br','I']:
return 'brown'
elif c in ['Co']:
return 'blue'
elif c in ['Cu','Ar']:
return 'orange'
elif c in ['Au']:
return 'gold'
elif c in ['Kr']:
return 'greentint'
elif c in ['Xe']:
return 'magenta'
elif c in ['Rn']:
return 'pinktint'
elif c in ['g']:
return 'sea'
# Most atom types, the default.
return 'grey'
# ------------------------------------------------------------------------------
class DotInfo:
# Dot class storing information about an individual dot.
def __init__(self, src, target, loc, spike, overlapType, gap, ptmaster, angle):
self.src = src # Source atom for the interaction
self.target = target # Target atom for the interactions
self.loc = loc # Location of the dot start
self.spike = spike # Location of the dot end
self.overlapType = overlapType # Type of overlap the interaction represents
self.gap = gap # Gap between the atoms
self.ptmaster = ptmaster # Main/side chain interaction type
self.angle = angle # Angle associated with the bump
self.dotCount = 1 # Used by _condense and raw output to count dots on the same source + target
# ------------------------------------------------------------------------------
def _save_dot(self, src, target, atomClass, loc, spike, overlapType, gap, ptmaster, angle):
'''
Generate and store a DotInfo entry with the specified parameters. It will be stored
into the self._results data structure.
:param src: Source atom for the dot.
:param target: Target atom for the dot, if any.
:param atomClass: Atom class of this dot, indicates where to store.
:param loc: Location of the dot start.
:param spike: Location of the dot end.
:param overlapType: Type of overlap for the dot.
:param gap: Gap spacing for the dot.
:param ptmaster: ptmaster entry for the dot.
:param angle: angle for the dot.
:return: As a side effect, this will add a new entry into one of the lists in the
self._results data structure.
'''
self._results[atomClass][self._dotScorer.interaction_type(overlapType,gap)].append(
self.DotInfo(src, target, loc, spike, overlapType, gap, ptmaster, angle)
)
# ------------------------------------------------------------------------------
def _generate_interaction_dots(self, sourceAtoms, targetAtoms, bondedNeighborLists):
'''
Find all interaction dots for the specified atom.
This does not include locations where the probe is inside a bonded neighbor.
:param sourceAtoms: Atoms that can be the source of an interaction.
:param targetAtoms: Atoms that can be the target of an interaction
(can be the same list as sourceAtoms for some approaches).
:param bondedNeighborLists: List of bonded neighbors for atoms in sourceAtoms.
:return: Side effect: Add dots to the self._results data structure by
atomclass and dot type.
'''
maxRadius = 2*self._maximumVDWRadius + 2 * self.params.probe.radius
for src in sourceAtoms:
# Find out what class of dot we should place for this atom.
atomClass = self._atomClasses[src]
# Generate no dots for ignored atoms.
if atomClass == 'ignore':
continue
# Generate no dots for atoms with too-low occupancy
if src.occ < self.params.probe.minimum_occupancy:
continue
# Find atoms that are close enough that they might touch.
nearby = self._spatialQuery.neighbors(src.xyz, 0.001, maxRadius)
# Find our characteristics
srcMainChain = self._inMainChain[src]
srcSideChain = self._inSideChain[src]
srcHet = self._inHet[src]
srcInWater = self._inWater[src]
srcExtra = self._extraAtomInfo.getMappingFor(src)
# Select those that are actually within the contact distance based on their
# particular radius and which are in the set of target atoms.
# Also verify that the potential target atoms meet our criteria based on parameters.
atomSet = set()
nearbyPhantomHydrogens = set()
for n in nearby:
nMainChain = self._inMainChain[n]
nHet = self._inHet[n]
nInWater = self._inWater[n]
nExtra = self._extraAtomInfo.getMappingFor(n)
# Keep a list of nearby Phantom Hydrogens in case we need to exclude them.
if nExtra.isDummyHydrogen:
nearbyPhantomHydrogens.add(n)
d = (Helpers.rvec3(n.xyz) - Helpers.rvec3(src.xyz)).length()
if ((n in targetAtoms) and
(d <= nExtra.vdwRadius + srcExtra.vdwRadius + 2*self.params.probe.radius)
):
# if both atoms are in the same non-HET chain and on the main chain, then skip
# if we're not allowing mainchain-mainchain interactions.
# The atoms must be on the same chain to be skipped.
if not self.params.include_mainchain_mainchain and (
(srcMainChain and nMainChain) and not (srcHet or nHet) and
(src.parent().parent().parent().id == n.parent().parent().parent().id) # Same chain
):
continue
# Skip atoms that are marked to be ignored
if self._atomClasses[n] == 'ignore':
continue
# Skip atoms with too low occupancy
elif n.occ < self.params.probe.minimum_occupancy:
continue
# Skip water-water interactions unless they are allowed
elif (not self.params.include_water_water) and srcInWater and nInWater:
continue
# Skip atoms that are in non-compatible alternate conformations
elif not Helpers.compatibleConformations(src, n):
continue
atomSet.add(n)
# Check the atoms for interactions
if len(atomSet) > 0:
# Find the atoms that are bonded to the source atom within the specified hop
# count. Limit the length of the chain to 3 if neither the source nor the final
# atom is a Hydrogen.
excluded = Helpers.getAtomsWithinNBonds(src, bondedNeighborLists,
self.params.excluded_bond_chain_length, 3)
# For Phantom Hydrogens, move any non-Acceptor atom in the atom list into the
# excluded list and also add nearby Phantom Hydrogens into the excluded list.
# @todo Consider whether we'd rather handle this by making bonds between the
# Phantoms and their water Oxygens (both directions), which will shield their
# contacts from one another and (1) avoid removing sections of hydrogen bond patterns
# that fall inside atoms that are covalently bonded to acceptors, and (2) remove
# the inner collision of the water Oxygen with atoms in the acceptor that also makes
# a Hydrogen bond with the acceptor.
if srcExtra.isDummyHydrogen:
newExclusions = set()
for a in atomSet:
if not self._extraAtomInfo.getMappingFor(a).isAcceptor:
newExclusions.add(a)
excluded = list(set(excluded).union(newExclusions).union(nearbyPhantomHydrogens))
# Remove all of the excluded atoms from the interaction set so we don't
# put spurious dots on them.
for e in excluded:
atomSet.discard(e)
# Check each dot to see if it interacts with non-bonded nearby target atoms.
srcDots = self._dots[src]
pr = self.params.probe.radius
scale = self.params.probe.overlap_scale_factor
for dotvect in srcDots:
# Find out if there is an interaction
res = self._dotScorer.check_dot(src, dotvect, pr, list(atomSet), excluded, scale)
# Classify the interaction and store appropriate results unless we should
# ignore the result because there was not valid overlap.
overlapType = res.overlapType
# If the overlap type is NoOverlap, check dot to make sure it is not annular.
# This excludes dots that are further from the contact than dots could be at
# the ideal just-touched contact.
if overlapType == probeExt.OverlapType.NoOverlap and res.annular:
continue
# Handle any dots that should not be ignored.
if overlapType != probeExt.OverlapType.Ignore:
# See whether this dot is allowed based on our parameters.
spo = self.params.output
show = False
interactionType = self._dotScorer.interaction_type(overlapType,res.gap)
if interactionType == probeExt.InteractionType.Invalid:
print('Warning: Invalid interaction type encountered (internal error)', file=self.logger)
continue
# Main branch if whether we're reporting other than bad clashes
if (not spo.only_report_bad_clashes):
# We are reporting other than bad clashes, see if our type is being reported
if spo.report_hydrogen_bonds and overlapType == probeExt.OverlapType.HydrogenBond:
show = True
elif spo.report_clashes and overlapType == probeExt.OverlapType.Clash:
show = True
elif spo.report_vdws and overlapType == probeExt.OverlapType.NoOverlap:
show = True
else:
# We are only reporting bad clashes. See if we're reporting clashes and this is
# a bad one.
if (spo.report_clashes and interactionType in [
probeExt.InteractionType.Bump, probeExt.InteractionType.BadBump]):
show = True
# If we're not showing this one, skip to the next
if not show:
continue
# Determine the ptmaster (main/side chain interaction type) and keep track of
# counts for each type.
causeMainChain = self._inMainChain[res.cause]
causeSideChain = self._inSideChain[res.cause]
causeHet = self._inHet[res.cause]
ptmaster = ' '
if srcMainChain and causeMainChain:
if (not srcHet) and (not causeHet): # This may be a redundant check
ptmaster = 'M'
self._MCMCCount[interactionType] += 1
elif srcSideChain and causeSideChain:
if (not srcHet) and (not causeHet): # This may be a redundant check
ptmaster = 'S'
self._SCSCCount[interactionType] += 1
elif ( (srcMainChain and causeSideChain) or (srcSideChain and causeMainChain) ):
if (not srcHet) and (not causeHet): # This may be a redundant check
ptmaster = 'P'
self._MCSCCount[interactionType] += 1
else:
ptmaster = 'O'
self._otherCount[interactionType] += 1
# Find the locations of the dot and spike by scaling the dot vector by the atom radius and
# the (negative because it is magnitude) overlap.
loc = Helpers.rvec3(src.xyz) + Helpers.rvec3(dotvect)
spikeloc = ( Helpers.rvec3(src.xyz) + Helpers.rvec3(dotvect).normalize() *
(self._extraAtomInfo.getMappingFor(src).vdwRadius - res.overlap) )
# Save the dot
self._save_dot(src, res.cause, atomClass, loc, spikeloc, overlapType, res.gap, ptmaster, 0)
# ------------------------------------------------------------------------------
def _generate_surface_dots_for(self, src, nearby):
'''
Find all surface dots for the specified atom.
This does not include locations where the probe is interacting with
a nearby atom, so it is a subset of the skin dots (for which only the
dots themselves are outside of the nearby atoms).
:param src: Atom whose surface dots are to be found.
:param nearby: Atoms that are nearby to src and might block surface dots.
:return: Side effect: Add dots on the surface of the atom to the
self._results data structure by atomclass and dot type.
'''
# Generate no dots for ignored atoms.
if self._atomClasses[src] == 'ignore':
return
# Check all of the dots for the atom and see if they should be
# added to the list.
srcInWater = self._inWater[src]
r = self._extraAtomInfo.getMappingFor(src).vdwRadius
pr = self.params.probe.radius
srcDots = self._dots[src]
for dotvect in srcDots:
# Dot on the surface of the atom, at its radius; both dotloc and spikeloc from original code.
# This is where the probe touches the surface.
dotloc = Helpers.rvec3(src.xyz) + Helpers.rvec3(dotvect)
# Dot that is one probe radius past the surface of the atom, exploring for contact with nearby
# atoms. This is the location of the center of the probe.
exploc = Helpers.rvec3(src.xyz) + Helpers.rvec3(dotvect).normalize() * (r + pr)
# If the exploring dot is within a probe radius + vdW radius of a nearby atom,
# we don't add a dot.
okay = True
for b in nearby:
bInWater = self._inWater[b]
# If we should ignore the nearby atom, we don't check it.
if self._atomClasses[b] == 'ignore':
continue
# If we're ignoring water-water interactions and both src and
# nearby are in a water, we should ignore this as well (unless
# both are hydrogens from the same water, in which case we
# continue on to check.)
elif ((not self.params.include_water_water) and srcInWater and bInWater
and src.parent() != b.parent() ):
continue
# The nearby atom is one that we should check interaction with, see if
# we're in range. If so, mark this dot as not okay because it is inside a
# nearby atom.
if ( (Helpers.rvec3(b.xyz) - exploc).length() <=
pr + self._extraAtomInfo.getMappingFor(b).vdwRadius ):
okay = False
# If this dot is okay, add it to the internal data structure based on its
# atom class and overlap type.
if okay:
self._save_dot(src, None, self._atomClasses[src], dotloc, dotloc,
probeExt.OverlapType.NoOverlap, 0.0, ' ', 0.0)
# ------------------------------------------------------------------------------
def _count_skin_dots_for(self, src, bonded):
'''
Count all skin dots for the specified atom.
:param src: Atom whose surface dots are to be found.
:param bonded: Atoms that are bonded to src by one or more hops.
:return: Side effect: Add dots on the surface of the atom to the
self._results data structure by atomclass and dot type.
'''
# No dots yet...
ret = 0
# Generate no dots for ignored atoms or for phantom hydrogens
if self._atomClasses[src] == 'ignore' or self._extraAtomInfo.getMappingFor(src).isDummyHydrogen:
return 0
# If we should ignore the bonded element, we don't check it.
# Remove any ignored atoms from the list of bonded atoms to pull this check out of
# the inner loop.
srcDots = self._dots[src]
realBonded = []
for b in bonded:
if self._atomClasses[b] != 'ignore' and not self._extraAtomInfo.getMappingFor(b).isDummyHydrogen:
realBonded.append(b)
# Check all of the dots for the atom and see if they should be
# added to the list.
return self._dotScorer.count_surface_dots(src, srcDots, realBonded)
# ------------------------------------------------------------------------------
def _count_skin_dots(self, atoms, bondedNeighborLists):
'''
Count all skin dots for the atoms passed in.
:param atoms: Atoms to check.
:param bondedNeighborLists: Neighbor list including these atoms.
This is used to normalize output scores.
:return: Number of skin dots on any of the atoms in the source selection.
'''
ret = 0
# Store parameters that are used in the inner loop
excluded_bond_chain_length = self.params.excluded_bond_chain_length
for src in atoms:
# Find the atoms that are bonded to the source atom within the specified hop
# count. Limit the length of the chain to 3 if neither the source nor the final
# atom is a Hydrogen.
neighbors = Helpers.getAtomsWithinNBonds(src, bondedNeighborLists,
excluded_bond_chain_length, 3)
# Count the skin dots for this atom.
ret += self._count_skin_dots_for(src, neighbors)
# Return the total count
return ret
# ------------------------------------------------------------------------------
def _condense(self, dotInfoList):
'''
Condensing the list of dots for use in raw output, sorting and removing
duplicates. Makes use of the self.params.output.condensed flag to control
whether to remove duplicates.
:param dotInfoList: List of DotInfo structures to condense.
:return: Condensed dotlist.
'''
ret = []
# Handle all of the dots associated with each source atom as a group.
# This will be from curAtomIndex to curAtomEndIndex.
curAtomIndex = 0
while curAtomIndex < len(dotInfoList):
# Find the last dot in the current atom, which may be at the end of the list.
curAtomEndIndex = len(dotInfoList) - 1
for curAtomEndIndex in range(curAtomIndex+1, len(dotInfoList)):
if dotInfoList[curAtomIndex].src != dotInfoList[curAtomEndIndex].src:
curAtomEndIndex -= 1
break
# Sort the dots for the same source atom based on characteristics of their target atom.
# We include the XYZ position in the sort so that we get the same order and grouping each
# time even though the phantom H? atoms are otherwise identical.
# There may be no target atoms specified (they may be None), which will
# cause an attribute error. If that happens, we don't sort.
try:
thisAtom = sorted(
dotInfoList[curAtomIndex:curAtomEndIndex+1],
key=lambda dot: "{}{:4.4s}{}{} {}{} {:.3f} {:.3f} {:.3f}".format(
dot.target.parent().parent().parent().id, # chain
str(dot.target.parent().parent().resseq_as_int()), # residue number
dot.target.parent().parent().icode, # insertion code
dot.target.parent().resname, # residue name
dot.target.name, # atom name
dot.target.parent().altloc, # alternate location
dot.target.xyz[0], dot.target.xyz[1], dot.target.xyz[2]
)
)
except AttributeError:
thisAtom = dotInfoList[curAtomIndex:curAtomEndIndex+1]
# Remove duplicates (same target atom) if we've been asked to.
# We do this by scanning through and accumulating counts as long as the target
# atom is the same and by appending a new entry when the target atom is different.
# The result is a single entry for each target atom with a count of the number of
# dots that were associated with it in the resulting entry.
if self.params.output.condensed and len(thisAtom) > 0:
thisAtom[0].dotCount = 1
condensed = [ thisAtom[0] ]
for i in range(1,len(thisAtom)):
if thisAtom[i-1].target.memory_id() == thisAtom[i].target.memory_id():
condensed[-1].dotCount += 1
else:
thisAtom[i].dotCount = 1
condensed.append(thisAtom[i])
thisAtom = condensed
# Append the sorted and potentially condensed list to the return list
ret.extend(thisAtom)
# Handle the chunk of dots on the next atom
curAtomIndex = curAtomEndIndex + 1
return ret
# ------------------------------------------------------------------------------
def _writeRawOutput(self, groupName, masterName):
'''
Describe raw summary counts for data of various kinds.
:param groupName: Name to give to the group.
:param masterName: Name for the beginning of each line.
:return: String to be added to the output.
'''
ret = ''
# Provide a short name for each interaction type
mast = {}
for t in self._interactionTypes:
mast[t] = probeExt.DotScorer.interaction_type_short_name(t)
# Store values that we will need often
density = self.params.probe.density
gap_weight = self.params.probe.gap_weight
bump_weight = self.params.probe.bump_weight
hydrogen_bond_weight = self.params.probe.hydrogen_bond_weight
# Go through all atom types and contact types and report the contacts.
for atomClass in self._allAtomClasses:
for interactionType in self._interactionTypes:
# Condensed report all of the dots of this type.
condensed = self._condense(self._results[atomClass][interactionType])
for node in condensed:
ret += "{}:{}:{}:".format(masterName, groupName, mast[interactionType])
# Describe the source atom
a = node.src
resName = a.parent().resname.strip().upper()
resID = str(a.parent().parent().resseq_as_int())
chainID = a.parent().parent().parent().id
iCode = a.parent().parent().icode
alt = a.parent().altloc
ret += "{:>2s}{:>3s} {}{} {}{:1s}:".format(chainID, resID, iCode, resName, a.name, alt)
# Describe the target atom, if it exists
t = node.target
if t is None:
ret += ":::::::"
else:
resName = t.parent().resname.strip().upper()
resID = str(t.parent().parent().resseq_as_int())
chainID = t.parent().parent().parent().id
iCode = t.parent().parent().icode
alt = t.parent().altloc
ret += "{:>2s}{:>4s}{}{} {:<3s}{:1s}:".format(chainID, resID, iCode, resName, t.name, alt)
r1 = self._extraAtomInfo.getMappingFor(a).vdwRadius
r2 = self._extraAtomInfo.getMappingFor(t).vdwRadius
sl = (Helpers.rvec3(a.xyz)-Helpers.rvec3(t.xyz)).length()
gap = sl - (r1 + r2)
dtgp = node.gap
score = 0.0
if interactionType in [probeExt.InteractionType.WideContact, probeExt.InteractionType.WideContact]:
scaledGap = dtgp / gap_weight
score = math.exp(-scaledGap*scaledGap)
elif interactionType in [
probeExt.InteractionType.WeakHydrogenBond, # don't know what to do here, because they can be both wc and cc, so will have to check
probeExt.InteractionType.SmallOverlap, # small overlap, doing nothing, as before
probeExt.InteractionType.Bump,
probeExt.InteractionType.BadBump]: # worse overlap, same as bad overlap
score = score = - bump_weight * sl
else: # Hydrogen bond
score = hydrogen_bond_weight * sl
if self.params.output.contact_summary:
ret += "{}:".format(node.dotCount)
ret += "{:.3f}:{:.3f}:{:.3f}:{:.3f}:{:.3f}:{:.3f}:{:.4f}".format(gap, dtgp,
node.spike[0], node.spike[1], node.spike[2], sl, score/density)
try:
tName = t.element
tBVal = "{:.2f}".format(t.b)
except Exception:
tName = ""
tBVal = ""
ret += ":{}:{}:{:.3f}:{:.3f}:{:.3f}".format(a.element, tName,
node.loc[0], node.loc[1], node.loc[2])
ret += ":{:.2f}:{}\n".format(a.b, tBVal)
return ret
# ------------------------------------------------------------------------------
def _writeOutput(self, groupName, masterName):
'''
Describe summary counts for data of various kinds.
:param groupName: Name to give to the group.
:param masterName: Name for the master command.
:return: String to be added to the output.
'''
ret = ''
ptm = ' '
color = ''
mast = {}
for t in self._interactionTypes:
# Probe uses spaces in these names for this function but underscores for others, so we replace
# underscores with spaces here.
mast[t] = probeExt.DotScorer.interaction_type_name(t).replace("_"," ")
extraMaster = ''
pointid = ''
lastpointid = ''
ptmast = ''
gapNames = ['z','y','x','w','v','u','t','g','r','q','f','F','Q','R','G','T','U','V','W','X','Y','Z']
# std gapbins scope at least -.5 to +.5, wider if probeRad > 0.25 standard
gaplimit = int(math.floor(((2*(max(self.params.probe.radius,0.25))+0.5)/0.05)+2))
gapcounts = [0] * gaplimit
maxgapcounts = 0
strcName = ''
# Rename contacts as needed
if self.params.output.merge_contacts:
mast[probeExt.InteractionType.WideContact] = mast[probeExt.InteractionType.CloseContact] = 'vdw contact'
if self.params.approach == 'surface':
mast[probeExt.InteractionType.CloseContact] = 'surface'
if self.params.output.add_group_name_master_line:
extraMaster = ' master={{{}}}'.format(masterName)
ret += "@subgroup dominant {{{}}}\n".format(groupName)
if self.params.approach == 'surface':
ret += "@master {{{}}}\n".format(mast[1])
else:
if self.params.output.report_vdws and not self.params.output.only_report_bad_clashes:
ret += "@master {{{}}}\n".format(mast[probeExt.InteractionType.WideContact])
if not self.params.output.merge_contacts:
ret += "@master {{{}}}\n".format(mast[probeExt.InteractionType.CloseContact])
if self.params.output.report_clashes or self.params.output.only_report_bad_clashes:
if not self.params.output.only_report_bad_clashes:
ret += "@master {{{}}}\n".format(mast[probeExt.InteractionType.SmallOverlap])
ret += "@master {{{}}}\n".format(mast[probeExt.InteractionType.Bump])
if self.params.output.separate_worse_clashes:
ret += "@master {{{}}}\n".format(mast[probeExt.InteractionType.BadBump])
if self.params.output.report_hydrogen_bonds and not self.params.output.only_report_bad_clashes:
ret += "@master {{{}}}\n".format(mast[probeExt.InteractionType.StandardHydrogenBond])
if self.params.probe.allow_weak_hydrogen_bonds:
ret += "@master {{{}}}\n".format(mast[probeExt.InteractionType.WeakHydrogenBond])
# Report count legend if any counts are nonzero.
if self._totalInteractionCount(self._MCMCCount) > 0:
ret += "@pointmaster 'M' {{McMc contacts}}\n"
if self._totalInteractionCount(self._SCSCCount) > 0:
ret += "@pointmaster 'S' {{ScSc contacts}}\n"
if self._totalInteractionCount(self._MCSCCount) > 0:
ret += "@pointmaster 'P' {{McSc contacts}}\n"
if self._totalInteractionCount(self._otherCount) > 0:
ret += "@pointmaster 'O' {{Hets contacts}}\n"
# Report binned gap legend if we're binning gaps
if self.params.output.bin_gaps:
for i in range(gaplimit):
ret += "@pointmaster '{}' {{gap {:3.2f}}}\n".format(gapNames[i],((i-11.0)/20.0)+0.05)
# Go through all atom types and contact types and report the contacts.
for atomClass in self._allAtomClasses:
for interactionType in self._interactionTypes:
# Write list headers for types that have entries. Do not write one for weak Hydrogen
# bonds unless we're separating them out.
if (len(self._results[atomClass][interactionType]) > 0 and
(self.params.probe.allow_weak_hydrogen_bonds or
interactionType != probeExt.InteractionType.WeakHydrogenBond
)
):
# The formatting of the header depends on the type
# of dot it is and whether atoms are masters. There is a basic line for each, with addition of
# a lens string for some cases. Some entries are dots and others are vectors.
lensDots = ""
listType = '@dotlist'
if interactionType in [probeExt.InteractionType.WideContact, probeExt.InteractionType.CloseContact]:
if self.params.output.add_lens_keyword:
lensDots = " lens"
elif interactionType in [probeExt.InteractionType.SmallOverlap, probeExt.InteractionType.Bump,
probeExt.InteractionType.BadBump]:
listType = '@vectorlist'
elif interactionType == probeExt.InteractionType.StandardHydrogenBond:
# Nothing special
pass
# Write the header based on the settings above and whether atoms are masters.
if self.params.output.atoms_are_masters:
ret += "{} {{x}} color={} master={{{} dots}} master={{{}}}{}{}\n".format(
listType,
self._color_for_atom_class(atomClass), atomClass, mast[interactionType], extraMaster,
lensDots
)
else:
ret += "{} {{x}} color={} master={{{}}}{}{}\n".format(
listType,
self._color_for_atom_class(atomClass), mast[interactionType], extraMaster,
lensDots
)
# Report all of the dots of this type.
for node in self._results[atomClass][interactionType]:
a = node.src
t = node.target
if self.params.output.bin_gaps:
# Include trailing space for a gapbin character (second point master)
ptmast = " '{} ' ".format(node.ptmaster)
elif node.ptmaster == " ":
# Blank means no point master
ptmast = ""
else:
ptmast = " '{}' ".format(node.ptmaster)
pointid = "{}{}{}{:4d}{}{}".format(a.name, a.parent().altloc, a.parent().resname,
a.parent().parent().resseq_as_int(), a.parent().parent().icode,
a.parent().parent().parent().id)
if pointid != lastpointid:
lastpointid = pointid
ret += '{{{}}}'.format(pointid)
else:
ret += '{"}'
if self.params.output.color_by_gap:
if t is not None:
color = self._color_for_gap(node.gap, interactionType)
ret += "{}".format(color)
else:
ret += "{} ".format(self.params.output.default_point_color)
# Handle gap binning if we're doing it
if self.params.output.bin_gaps:
Lgotgapbin = False # until identify which gapbin
for k in range(gaplimit):
# pt master intervals of 0.05 from -0.5 to +0.5
if node.gap < ((k-11.0)/20.0)+0.05:
# Replace the fourth character of ptmast with the appropriate gap name
ptmast = ptmast[:3]+gapNames[k]+ptmast[4:]
gapcounts[k] += 1
maxgapcounts = max(gapcounts[k], maxgapcounts)
if k < gaplimit:
Lgotgapbin = True
break
if not Lgotgapbin:
# assign this node, aka dot, to overflow gapbin
ptmast = ptmast[:3]+gapNames[-1]+ptmast[4:]
gapcounts[-1] += 1
if interactionType in [probeExt.InteractionType.SmallOverlap, probeExt.InteractionType.Bump,
probeExt.InteractionType.BadBump]:
ret += 'P {}{:.3f},{:.3f},{:.3f} {{"}}{} {}{:.3f},{:.3f},{:.3f}\n'.format(
ptmast, node.loc[0], node.loc[1], node.loc[2],
color,
ptmast, node.spike[0], node.spike[1], node.spike[2]
)
else: # Contact or H bond
ret += "{}{:.3f},{:.3f},{:.3f}\n".format(
ptmast, node.loc[0], node.loc[1], node.loc[2]
)
# Print the gap bins if we have computed them.
if self.params.output.bin_gaps:
ret += "@text\n"
for k in range(gaplimit):
ret += "{{{:5.2f}, {:8d} }}\n".format(
(((k-11.0)/20.0)+0.05),gapcounts[k]
)
# kinemage 2
ret += "@kinemage 2\n"
ret += "@group {{gapbins}} dominant\n"
ret += "@vectorlist {gapbins}\n"
for k in range(gaplimit-1):
ret += "{{{:5.2f}, {:8d} }} {:5.2f}, {:8f}, 0.00\n".format(
(((k-11.0)/20.0)+0.05), gapcounts[k],
(((k-11.0)/20.0)+0.05)*maxgapcounts, gapcounts[k]
)
ret += "@labellist {gapbins}\n"
ret += "{0} 0.0, -1.0, 0.0\n"
# LXHvector output in probe had to do with -oneDotEach, so we don't include it here.
return ret
# ------------------------------------------------------------------------------
def _doEnumeration(self, reportSubScores, isSurface, numSkinDots):
'''
Compute summary counts for data of various kinds. Called by _rawEnumerate() and
_enumerate() to do the shared work.
:param reportSubScores: Provide reports on different contact subscores.
:param isSurface: Are these all surface dots?
:param numSkinDots: The number of dots on atom skins. This is used to normalize output scores.
:return: Tuple of values: (string_to_output, tgs, ths, thslen, tbs, tbslen, tsas,
tGscore, tHscore, tBscore, tscore)
'''
retString = ''
# Store values that we will need often
approach = self.params.approach
density = self.params.probe.density
gap_weight = self.params.probe.gap_weight
bump_weight = self.params.probe.bump_weight
hydrogen_bond_weight = self.params.probe.hydrogen_bond_weight
# Compute the counts
tgs = ths = thslen = tbs = tbslen = tsas = 0
tGscore = tHscore = tBscore = tscore = 0
for c in self._allAtomClasses:
for t in self._interactionTypes:
res = self._results[c][t]
if len(res) > 0:
# gs stores all of the values unless reportSubScores is True
gs = hs = hslen = bs = bslen = score = psas = 0
# Print a line describing the atom class and interaction type.
label = "external_dots "
if not isSurface:
label = probeExt.DotScorer.interaction_type_name(t)
retString += "{:>3s} {:14s} ".format(c, label)
for node in self._results[c][t]:
if reportSubScores:
if t in [probeExt.InteractionType.WideContact, probeExt.InteractionType.CloseContact,
probeExt.InteractionType.WeakHydrogenBond]:
gs += 1
dtgp = node.gap
scaledGap = dtgp/gap_weight
scoreValue = math.exp(-scaledGap*scaledGap)
score += scoreValue
tGscore += scoreValue
elif t in [probeExt.InteractionType.SmallOverlap, probeExt.InteractionType.Bump,
probeExt.InteractionType.BadBump]:
bs += 1
slen = 0.5*abs(node.gap);
bslen += slen
scoreValue = - bump_weight * slen
score += scoreValue
tBscore += scoreValue
else: # Hydrogen bond
hs += 1
slen = 0.5*abs(node.gap)
hslen += slen
scoreValue = hydrogen_bond_weight * slen
score += scoreValue
tHscore += scoreValue
else:
gs += 1
if approach == 'surface':
p_radius = self.params.probe.radius
a_radius = self._extraAtomInfo.getMappingFor(node.src).vdwRadius
psas += (a_radius + p_radius)*(a_radius + p_radius)/(a_radius * a_radius)
# Finish reporting by atom class and interaction type
if reportSubScores:
if t in [probeExt.InteractionType.WideContact, probeExt.InteractionType.CloseContact,
probeExt.InteractionType.WeakHydrogenBond]:
retString += "{:7d} {:5.1f}% {:9.1f} {:9.2f}\n".format(gs, 100.0*gs/numSkinDots, score/density,
1000.0*score/numSkinDots)
elif t in [probeExt.InteractionType.SmallOverlap, probeExt.InteractionType.Bump,
probeExt.InteractionType.BadBump]:
retString += "{:7d} {:5.1f}% {:9.1f} {:9.2f}\n".format(bs, 100.0*bs/numSkinDots, score/density,
1000.0*score/numSkinDots)
else: # Hydrogen bond
retString += "{:7d} {:5.1f}% {:9.1f} {:9.2f}\n".format(hs, 100.0*hs/numSkinDots, score/density,
1000.0*score/numSkinDots)
else:
retString += "{:7d} {:5.1f}%\n".format(gs, 100.0*gs/numSkinDots)
# Done computing for this category, calculate totals
tgs += gs
ths += hs
thslen += hslen
tbs += bs
tbslen += bslen
tscore += score
if approach == 'surface':
tsas += psas # tally the solvent accessible surface
return (retString, tgs, ths, thslen, tbs, tbslen, tsas, tGscore, tHscore, tBscore, tscore)
# ------------------------------------------------------------------------------
def _rawEnumerate(self, groupName, numberSelected, reportSubScores, isSurface, numSkinDots, masterName):
'''
Describe summary counts for data of various kinds.
:param groupName: Name to give to the group.
:param numberSelected: Number of atoms in the selection.
:param reportSubScores: Provide reports on different contact subscores.
:param isSurface: Are these all surface dots?
:param numSkinDots: The number of dots on atom skins. This is used to normalize output scores.
:param masterName: Name for the beginning of each line.
:return: String to be added to the output.
'''
# The C code has a rawName parameter, but it was only nonempty for autobondrot/movingDoCommand
# The C code has a scoreBias parameter, but it was only nonzero for autobondrot/movingDoCommand
ret = ""
# If we have an empty selection, report zero.
if numberSelected <= 0 or numSkinDots <= 0:
ret += "{:9.3f}".format(0.0)
else:
# Compute and report the score. Discard anything from the return string in the count
# routine -- we don't want to print it.
(retString, tgs, ths, thslen, tbs, tbslen, tsas, tGscore, tHscore, tBscore, tscore
) = self._doEnumeration(reportSubScores, isSurface, numSkinDots)
# Output one line of information.
if isSurface:
ret += "{:9.3f}".format( (tgs+tbs+ths)/self.params.probe.density )
elif reportSubScores:
ret += "{:9.3f}".format( tscore/self.params.probe.density )
else:
ret += "{:9.3f}".format( tgs )
# Report the same information at the end of the line whether or not we counted the scores.
if len(groupName) > 0 or len(masterName) > 0:
ret += "#"
if len(masterName) > 0:
ret += " {}".format(masterName)
if len(groupName) > 0:
ret += " {}".format(groupName)
ret += "\n"
return ret
# ------------------------------------------------------------------------------
def _count_summary(self, modeName, completed = True):
'''
Describe summary counts for chain-vs.-chain counts.
:param modeName: Description of the mode of operation to report.
:param completed: This is the last iteration, so print the accumulated values.
:return: String to be added to the output.
'''
ret = ''
# Keep a running total of values for each chain-vs.-chain list.
# The first time we're run, fill the values with 0.
# Clear the global counts once they have been added to the running total so we can run a new count.
if not hasattr(self,'_MCMCTotal'):
self._MCMCTotal = {}
self._SCSCTotal = {}
self._MCSCTotal = {}
self._otherTotal = {}
for t in self._interactionTypes:
self._MCMCTotal[t] = 0
self._SCSCTotal[t] = 0
self._MCSCTotal[t] = 0
self._otherTotal[t] = 0
for t in self._interactionTypes:
self._MCMCTotal[t] += self._MCMCCount[t]
self._MCMCCount[t] = 0
self._SCSCTotal[t] += self._SCSCCount[t]
self._SCSCCount[t] = 0
self._MCSCTotal[t] += self._MCSCCount[t]
self._MCSCCount[t] = 0
self._otherTotal[t] += self._otherCount[t]
self._otherCount[t] = 0
# Compute the sum of all subtypes per interaction type.
sumTotal = {}
for t in self._interactionTypes:
sumTotal[t] = self._MCMCTotal[t] + self._SCSCTotal[t] + self._MCSCTotal[t] + self._otherTotal[t]
# If we're at the last pass, fill in our return string.
if completed:
if self.params.output.format == 'oneline':
# Report the file name that was read along with its summary data on one line
ret += ": {} ".format(self.data_manager.get_model_names()[0])
for c in [self._MCMCTotal, self._SCSCTotal, self._MCSCTotal, self._otherTotal]:
for t in self._interactionTypes:
ret += ":{:9d} ".format(c[t])
ret += ":\n"
else:
ret += "@text\n"
ret += "probe: {}\n".format(modeName)
ret += "{}\n".format(self.data_manager.get_model_names()[0])
ret += ":CONTACT: WIDE : CLOSE : weak H-bonds : SMALL : BAD : WORSE : H-BOND :\n"
for (c,name) in [(self._MCMCTotal, "MCMC"), (self._SCSCTotal, "SCSC"),
(self._MCSCTotal, "MCSC"), (self._otherTotal, "OTHER"),
(sumTotal, "SUM")]:
ret += ":{:7s}".format(name)
for t in self._interactionTypes:
ret += ":{:9d} ".format(c[t])
ret += ":\n"
return ret
# ------------------------------------------------------------------------------
def _enumerate(self, groupName, numberSelected, reportSubScores, isSurface, numSkinDots):
'''
Describe summary counts for data of various kinds.
:param groupName: Name to give to the group.
:param numberSelected: Number of atoms in the selection.
:param reportSubScores: Provide reports on different contact subscores.
:param isSurface: Are these all surface dots?
:param numSkinDots: The number of dots on atom skins. This is used to normalize output scores.
:return: String to be added to the output.
'''
ret = ''
# Store values that we will need often
density = self.params.probe.density
ret += " \nsubgroup: {}\n".format(groupName)
ret += "atoms selected: {}\npotential dots: {}\npotential area: {:.1f} A^2\n".format(
numberSelected, numSkinDots, numSkinDots/density)
if numberSelected <=0 or numSkinDots <= 0:
ret += "empty selection\n"
return
if reportSubScores:
ret += " type # % score score/A^2 x 1000\n"
else:
ret += " type # %\n"
# Compute the counts
(retString, tgs, ths, thslen, tbs, tbslen, tsas, tGscore, tHscore, tBscore, tscore
) = self._doEnumeration(reportSubScores, isSurface, numSkinDots)
ret += retString
# Report the counts
if reportSubScores:
ret += "\n tot contact: {:7d} {:5.1f}% {:9.1f} {:9.2f}\n".format(
tgs, 100.0*tgs/numSkinDots, tGscore/density, 1000.0*tGscore/numSkinDots
)
ret += " tot overlap: {:7d} {:5.1f}% {:9.1f} {:9.2f}\n".format(
tbs, 100.0*tbs/numSkinDots, tBscore/density, 1000.0*tBscore/numSkinDots
)
ret += " tot H-bond: {:7d} {:5.1f}% {:9.1f} {:9.2f}\n".format(
ths, 100.0*ths/numSkinDots, tHscore/density, 1000.0*tHscore/numSkinDots
)
ret += "\n grand tot: {:7d} {:5.1f}% {:9.1f} {:9.2f}\n".format(
(tgs+tbs+ths), 100.0*(tgs+tbs+ths)/numSkinDots, tscore/density, 1000.0*tscore/numSkinDots
)
ret += "\ncontact surface area: {:.1f} A^2\n".format((tgs+tbs+ths)/density)
else:
ret += " tot: {:7d} {:5.1f}%\n\n".format(tgs, 100.0*tgs/numSkinDots)
ret += " contact surface area: {:.1f} A^2\n".format(tgs/density)
if self.params.approach == 'surface':
ret += "accessible surface area: {:.1f} A^2\n\n".format(tsas/density)
return ret
# ------------------------------------------------------------------------------
def _describe_selection_and_parameters(self, groupLabel, selectionName):
'''
Describe the selection type and other parameters for a run. Called by various run types.
:param groupLabel: Name to give to the group.
:param selectionName: Name of the selection mode: 'self', 'once'.
:return: String to be added to the output.
'''
ret = ''
ret += "selection: {}\nname: {}\n".format(selectionName, groupLabel)
ret += "density: {:.1f} dots per A^2\nprobeRad: {:.3f} A\nVDWrad: (r * {:.3f}) + {:.3f} A\n".format(
self.params.probe.density, self.params.probe.radius, self.params.atom_radius_scale,
self.params.atom_radius_offset)
ret += "score weights: gapWt={:0g}, bumpWt={:0g}, HBWt={:0g}\n".format(
self.params.probe.gap_weight, self.params.probe.bump_weight, self.params.probe.hydrogen_bond_weight)
return ret
# ------------------------------------------------------------------------------
def _report_single_interaction(self, groupLabel, selectionName, comparisonString, intersectionName,
numModels, modelIndex):
'''
Print information about a single interaction, either self interaction or once interaction.
:param groupLabel: Name to give to the group.
:param selectionName: Name of the selection mode: 'self', 'once'.
:param comparisonString: String decribing the comparison: '1->1', '1->2'.
:param intersectionName: Name of the intersection being done: 'SelfIntersect', 'IntersectOnce'.
:param numModels: Number of models we are running over.
:param modelIndex: Current model we are running.
:return: String to be added to the output.
'''
ret = ''
# Count the dots if we've been asked to do so.
if self.params.output.count_dots:
numSkinDots = self._count_skin_dots(self._source_atoms_sorted, self._allBondedNeighborLists)
if self.params.output.format != 'raw':
ret += self._describe_run("program:","command:")
ret += self._describe_selection_and_parameters(groupLabel, selectionName)
nsel = len(self._source_atoms_sorted)
if self.params.output.format == 'raw':
ret += self._rawEnumerate("", nsel, self.params.output.compute_scores, False, numSkinDots, groupLabel)
else:
ret += self._enumerate("{} dots".format(selectionName), nsel, self.params.output.compute_scores, False, numSkinDots)
else: # Not counting the dots
# Check for various output format types.
# We're not implementing O format or XV format, but we still allow raw and oneline
if self.params.output.format == 'raw':
ret += self._writeRawOutput(comparisonString,groupLabel)
elif self.params.output.format == 'oneline':
ret += self._count_summary(intersectionName)
elif self.params.output.format == 'standard': # Standard/Kinemage format
ret += self._describe_run("@caption"," command:")
if self.params.output.contact_summary:
ret += self._count_summary(intersectionName)
if self.params.output.add_group_line:
if numModels > 1:
# doing one of multiple models of an ensemble
ret += "@group dominant {{{} M{}}} animate\n".format(groupLabel,modelIndex)
else:
ret += "@group dominant {{{}}}\n".format(groupLabel)
ret += self._writeOutput("{} dots".format(selectionName), groupLabel)
else:
raise ValueError("Unrecognized output format: "+self.params.output.format+" (internal error)")
return ret
# ------------------------------------------------------------------------------
def _clear_results(self):
# Initialize the results to empty.
self._results = {}
for c in self._allAtomClasses:
interactionTypeDicts = {}
for i in self._interactionTypes:
interactionTypeDicts[i] = []
self._results[c] = interactionTypeDicts
# ------------------------------------------------------------------------------
def _describe_run(self, header1, header2):
'''
Describe the command-line and other Phil options used for this run so that
it could be reproduced.
:param header1: Header for the first output line (the version/time line).
:param header2: Header for the second output line (the command and its arguments).
:return: String to be added to the output.
'''
global version
ret = '{} probe2 v.{}, run {}\n'.format(header1, version, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
ret += header2
for a in sys.argv:
ret += ' {}'.format(a)
ret += '\n'
return ret
# ------------------------------------------------------------------------------
def validate(self):
self.data_manager.has_models(raise_sorry=True)
if self.params.output.file_name is None:
raise Sorry("Must specify output.file_name")
if self.params.source_selection is None:
raise Sorry("Must specify a source parameter for approach "+self.params.approach)
if self.params.approach in ['once','both'] and self.params.target_selection is None:
raise Sorry("Must specify a target parameter for approach "+self.params.approach)
aScale = self.params.atom_radius_scale
if aScale < 0.0001 or aScale > 1000:
raise Sorry("Invalid atom_radius_scale value: {:0g}".format(aScale))
ao = self.params.atom_radius_offset
if ao < -10 or ao > 1000:
raise Sorry("Invalid atom_radius_offset value: {:0g}".format(ao))
# Ensure consistency among parameters
if self.params.probe.contact_cutoff < self.params.probe.radius:
self.params.probe.contact_cutoff = self.params.probe.radius
# ------------------------------------------------------------------------------
def run(self):
# Run unit tests if we've been asked to
if self.params.run_tests:
make_sub_header('Run unit tests', out=self.logger)
self.Test()
# String that will be output to the specified file.
outString = ''
if (self.params.output.add_kinemage_keyword and not self.params.output.count_dots
and self.params.output.format == 'standard'):
outString += '@kinemage 1\n'
make_sub_header('Interpret Model', out=self.logger)
# Get our model.
self.model = self.data_manager.get_model()
# Fix up bogus unit cell when it occurs by checking crystal symmetry.
cs = self.model.crystal_symmetry()
if (cs is None) or (cs.unit_cell() is None):
self.model = shift_and_box_model(model = self.model)
################################################################################
# Get the bonding information we'll need to exclude our bonded neighbors.
allAtoms = self.model.get_atoms()
make_sub_header('Compute neighbor lists', out=self.logger)
try:
p = mmtbx.model.manager.get_default_pdb_interpretation_params()
p.pdb_interpretation.use_neutron_distances = self.params.use_neutron_distances
self.model.process(make_restraints=True, pdb_interpretation_params=p) # make restraints
geometry = self.model.get_restraints_manager().geometry
sites_cart = self.model.get_sites_cart() # cartesian coordinates
bondProxies, asu = \
geometry.get_all_bond_proxies(sites_cart = sites_cart)
except Exception as e:
raise Sorry("Could not get bonding information for input file: " + str(e))
################################################################################
# Get the bonding information we'll need to exclude our bonded neighbors.
self._allBondedNeighborLists = Helpers.getBondedNeighborLists(allAtoms, bondProxies)
################################################################################
# Get the extra atom information needed to score all of the atoms in the model.
make_sub_header('Compute extra atom information', out=self.logger)
ret = Helpers.getExtraAtomInfo(self.model,useNeutronDistances=self.params.use_neutron_distances,
useImplicitHydrogenDistances=self.params.probe.implicit_hydrogens,
useProbeTablesByDefault=self.params.probe.use_original_probe_tables)
self._extraAtomInfo = ret.extraAtomInfo
if len(ret.warnings) > 0:
print('Warnings returned by getExtraAtomInfo():\n'+ret.warnings, file=self.logger)
# Scale and offset the radius values for all atoms based on our command-line arguments.
for a in allAtoms:
ei = self._extraAtomInfo.getMappingFor(a)
ei.vdwRadius = self._scaled_atom_radius(a)
self._extraAtomInfo.setMappingFor(a, ei)
################################################################################
# Find the maximum VDW radius of any of our atoms, used to limit searches for nearby
# atoms.
self._maximumVDWRadius = 1
for a in allAtoms:
self._maximumVDWRadius = max(self._maximumVDWRadius, self._extraAtomInfo.getMappingFor(a).vdwRadius)
################################################################################
# Get the extra atom information needed to sort all of the atoms in the model
# into proper classes for reporting. These classes may be atom names, when we're
# sorting by atoms and it can be nucleic acid base names when we're sorting by that.
# Comes from newAtom() and dotType() functions in probe.c.
# Rather than a table indexed by type, we directly write the result.
# Handle all atoms, not only selected atoms.
self._atomClasses = {}
for a in allAtoms:
if not a.element_is_hydrogen():
# All elements except hydrogen use their own names.
self._atomClasses[a] = self._atom_class_for(a)
else:
# For hydrogen, assign based on what it is bonded to.
if len(self._allBondedNeighborLists[a]) != 1:
raise Sorry("Found Hydrogen with number of neigbors other than 1: "+
str(len(self._allBondedNeighborLists[a])))
else:
self._atomClasses[a] = self._atom_class_for(self._allBondedNeighborLists[a][0])
################################################################################
# Get the dot sets we will need for each atom. This is the set of offsets from the
# atom center where dots should be placed. We use a cache to reduce the calculation
# time by returning the same answer for atoms that have the same radius.
dotCache = probeExt.DotSphereCache(self.params.probe.density)
self._dots = {}
for a in allAtoms:
self._dots[a] = dotCache.get_sphere(self._extraAtomInfo.getMappingFor(a).vdwRadius).dots()
################################################################################
# Get the other characteristics we need to know about each atom to do our work.
self._inWater = {}
self._inHet = {}
self._inMainChain = {}
self._inSideChain = {}
hetatm_sel = self.model.selection("hetatm")
mainchain_sel = self.model.selection("backbone") # Will NOT include Hydrogen atoms on the main chain
sidechain_sel = self.model.selection("sidechain") # Will include Hydrogen atoms on the side chain
for a in allAtoms:
self._inWater[a] = common_residue_names_get_class(name=a.parent().resname) == "common_water"
self._inHet[a] = hetatm_sel[a.i_seq]
if not a.element_is_hydrogen():
self._inMainChain[a] = mainchain_sel[a.i_seq]
else:
# Check our bonded neighbor to see if it is on the mainchain if we are a Hydrogen
if len(self._allBondedNeighborLists[a]) != 1:
raise Sorry("Found Hydrogen with number of neigbors other than 1: "+
str(len(self._allBondedNeighborLists[a])))
else:
self._inMainChain[a] = mainchain_sel[self._allBondedNeighborLists[a][0].i_seq]
self._inSideChain[a] = sidechain_sel[a.i_seq]
################################################################################
# Ensure that the model we've been passed has at least one Hydrogen bonded to a Carbon
# and at least one polar Hydrogen (bonded to N, O, or S). Otherwise, raise a Sorry.
if not self.params.probe.implicit_hydrogens:
foundCBonded = False
foundPolar = False
for a in allAtoms:
if a.element_is_hydrogen():
if len(self._allBondedNeighborLists[a]) != 1:
raise Sorry("Found Hydrogen with number of neigbors other than 1: "+
str(len(self._allBondedNeighborLists[a])))
else:
neighbor = self._allBondedNeighborLists[a][0]
if neighbor.element in ['N', 'O', 'S']:
foundPolar = True
elif neighbor.element == 'C':
foundCBonded = True
if not (foundCBonded and foundPolar):
raise Sorry("Did not find both polar and non-polar Hydrogens in model. For proper operation, "+
"Probe requires explicit Hydrogens. Run Reduce2 or another placement "+
"program on the model before running Probe.")
################################################################################
# Get the source selection (and target selection if there is one). These will be
# lists of atoms that are in each selection, a subset of the atoms in the model.
# If there is no model_id in the selection criteria, these may include atoms from
# multiple models in the hierarchy.
source_sel = self.model.selection(self.params.source_selection)
allSourceAtoms = set()
for a in allAtoms:
if source_sel[a.i_seq]:
allSourceAtoms.add(a)
allTargetAtoms = set()
if self.params.target_selection is not None:
# If the target selection is "=", that means that it should be the same as the source selection.
if self.params.target_selection == "=":
allTargetAtoms = allSourceAtoms
else:
target_sel = self.model.selection(self.params.target_selection)
for a in allAtoms:
if target_sel[a.i_seq]:
allTargetAtoms.add(a)
################################################################################
# We usually have the selection pick a model, but in the case of SELFINTERSECT with one
# input file and no model specified in the source and target patterns, we loop over all
# models in the file.
# We get lists of all atoms present in each hierarchy model that we're running.
# This is the one selected when one is selected and it is all of the available ones
# when no particular one is selected.
atomLists = [ self.model.get_atoms() ]
if (self.params.approach == 'self' and
(self.params.source_selection is None or 'model_id' not in self.params.source_selection) and
(self.params.target_selection is None or 'model_id' not in self.params.target_selection)):
# Handle the multiple-model case by looping modelID over all models.
numModels = self.model.get_hierarchy().models_size()
atomLists = []
for i in range(numModels):
atomLists.append( self.model.get_hierarchy().models()[i].atoms() )
for modelIndex, atoms in enumerate(atomLists):
################################################################################
# Get the subset of the source selection and target selection for this hierarchy
# model.
source_atoms = set()
for a in allSourceAtoms:
if a in atoms:
source_atoms.add(a)
target_atoms = set()
for a in allTargetAtoms:
if a in atoms:
target_atoms.add(a)
################################################################################
# Find a list of all of the selected atoms with no duplicates
# Get the bonded neighbor lists for the atoms that are in this selection.
all_selected_atoms = source_atoms.union(target_atoms)
bondedNeighborLists = Helpers.getBondedNeighborLists(all_selected_atoms, bondProxies)
################################################################################
# Build a spatial-query structure that tells which atoms are nearby.
# Include all atoms in the structure, not just the ones that have been selected,
# unless we've been asked not to keep them.
make_sub_header('Make spatial-query accelerator', out=self.logger)
if self.params.keep_unselected_atoms:
self._spatialQuery = probeExt.SpatialQuery(atoms)
else:
self._spatialQuery = probeExt.SpatialQuery(list(all_selected_atoms))
################################################################################
# If we're not doing implicit hydrogens, add Phantom hydrogens to waters and mark
# the water oxygens as not being donors in atoms that are in the source or target selection.
# Also clear the donor status of all N, O, S atoms because we have explicit hydrogen donors.
if not self.params.probe.implicit_hydrogens:
make_sub_header('Adjusting for explicit hydrogens', out=self.logger)
if self.params.output.record_added_hydrogens:
outString += '@vectorlist {water H?} color= gray\n'
# Check all selected atoms
for a in all_selected_atoms:
# @todo Look up the radius of a water Hydrogen. This may require constructing a model with
# a single water in it and asking about the hydrogen radius.
phantomHydrogenRadius = 1.0 # @Remove after regression tests are complete.
#phantomHydrogenRadius = 1.05
if self.params.use_neutron_distances:
phantomHydrogenRadius = 1.0
adjustedHydrogenRadius = self.params.atom_radius_offset + (phantomHydrogenRadius * self.params.atom_radius_scale)
# Adjust hydrogen atom class and radius as needed.
if a.element_is_hydrogen():
# If we are in a water, make sure our occupancy and temperature (b) factor are acceptable.
# If they are not, set the class for the atom to 'ignore'.
if self._inWater[a] and (a.occ < self.params.minimum_polar_hydrogen_occupancy or
a.b > self.params.maximum_polar_hydrogen_b):
self._atomClasses[a] = 'ignore'
else:
for n in bondedNeighborLists[a]:
if n.element in ['N','O','S']:
# We may have our radius adjusted
ei = self._extraAtomInfo.getMappingFor(a)
if self.params.use_polar_hydrogens:
ei.vdwRadius = adjustedHydrogenRadius
self._extraAtomInfo.setMappingFor(a, ei)
# If we are the Oxygen in a water, then add phantom hydrogens pointing towards nearby acceptors
elif self._inWater[a] and a.element == 'O':
# We're an acceptor and not a donor.
ei = self._extraAtomInfo.getMappingFor(a)
ei.isDonor = False
ei.isAcceptor = True
self._extraAtomInfo.setMappingFor(a, ei)
# If we don't yet have Hydrogens attached, add phantom hydrogen(s)
if len(bondedNeighborLists[a]) == 0:
newPhantoms = Helpers.getPhantomHydrogensFor(a, self._spatialQuery, self._extraAtomInfo, 0.0, True,
adjustedHydrogenRadius)
phantomHydrogenRadius = 1.05 # @todo Remove these two lines after regression testing is complete.
adjustedHydrogenRadius = self.params.atom_radius_offset + (phantomHydrogenRadius * self.params.atom_radius_scale)
for p in newPhantoms:
# NOTE: The Phantoms have the same i_seq number as their parents. Although this does not
# impact our Probe data structures and algorithms, we'd like to avoid this in case it leaks
# through to some CCTBX-called code.
# This would require us to redo the i_seq numbers on the hierarchy and then recompute
# everything (unfortunately including the selection).
# Add the atom to the spatial-query data structure
self._spatialQuery.add(p)
# Set the extra atom information for this atom
ei = probeExt.ExtraAtomInfo(adjustedHydrogenRadius, False, True, True)
self._extraAtomInfo.setMappingFor(p, ei)
# Set the atomClass and other data based on the parent Oxygen.
self._atomClasses[p] = self._atom_class_for(a)
self._inWater[p] = self._inWater[a]
self._inMainChain[p] = self._inMainChain[a]
self._inSideChain[p] = self._inSideChain[a]
self._inHet[p] = self._inHet[a]
# @todo In the future, we may add these bonds, but that will cause the
# Phantom Hydrogens to mask their water Oxygens from close contacts or
# clashes with the acceptors, which is a change in behavior from the
# original Probe. For now, we separately handle Phantom Hydrogen
# interactions as special cases in the code.
bondedNeighborLists[p] = []
self._allBondedNeighborLists[p] = []
#bondedNeighborLists[p] = [a]
#bondedNeighborLists[a].append(p)
#self._allBondedNeighborLists[p] = [a]
#self._allBondedNeighborLists[a].append(p)
# Generate source dots for the new atom
self._dots[p] = dotCache.get_sphere(self._extraAtomInfo.getMappingFor(p).vdwRadius).dots()
# Add the new atom to any selections that the old atom was in.
if a in source_atoms:
source_atoms.add(p)
if a in target_atoms:
target_atoms.add(p)
# Report on the creation if we've been asked to
if self.params.output.record_added_hydrogens:
resName = a.parent().resname.strip().upper()
resID = str(a.parent().parent().resseq_as_int())
chainID = a.parent().parent().parent().id
iCode = a.parent().parent().icode
alt = a.parent().altloc
outString += '{{{:4s}{:1s}{:>3s}{:>2s}{:>4s}{}}}P {:8.3f}{:8.3f}{:8.3f}\n'.format(
a.name, alt, resName, chainID, resID, iCode,
a.xyz[0], a.xyz[1], a.xyz[2])
resName = p.parent().resname.strip().upper()
resID = str(p.parent().parent().resseq_as_int())
chainID = p.parent().parent().parent().id
iCode = p.parent().parent().icode
alt = p.parent().altloc
outString += '{{{:4s}{:1s}{:>3s}{:>2s}{:>4s}{}}}L {:8.3f}{:8.3f}{:8.3f}\n'.format(
p.name, alt, resName, chainID, resID, iCode,
p.xyz[0], p.xyz[1], p.xyz[2])
# Fix up the donor status for all of the atoms now that we've added the final explicit
# Phantom Hydrogens.
Helpers.fixupExplicitDonors(all_selected_atoms, bondedNeighborLists, self._extraAtomInfo)
################################################################################
# Re-fill all_selected_atoms
all_selected_atoms = source_atoms.union(target_atoms)
################################################################################
# Construct a DotScorer object. This must be done after we've added all Phantom
# Hydrogens and adjusted all of the ExtraAtomInfo.
make_sub_header('Make dot scorer', out=self.logger)
self._dotScorer = probeExt.DotScorer(self._extraAtomInfo, self.params.probe.gap_weight,
self.params.probe.bump_weight, self.params.probe.hydrogen_bond_weight,
self.params.probe.uncharged_hydrogen_cutoff, self.params.probe.charged_hydrogen_cutoff,
self.params.probe.clash_cutoff, self.params.probe.worse_clash_cutoff,
self.params.probe.contact_cutoff, self.params.probe.allow_weak_hydrogen_bonds)
################################################################################
# List of all of the keys for atom classes, including all elements and all
# nucleic acid types. These are in the order that the original Probe reported
# them. Based on atomprops.h:INIT_ATOM_TABLE from original probe.
self._allAtomClasses = ['ignore',
'H','C','N','O','P','S','As','Se','F','Cl','Br','I',
'Li','Na','Al','K','Mg','Ca','Mn','Fe','Co','Ni','Cu','Zn',
'Rb','Sr','Mo','Ag','Cd','In','Cs','Ba','Au','Hg','Tl','Pb',
'V','Cr','Te','Sm','Gd','Yb','W','Pt','U',
'He','Be','B','Ne','Se','Ar','Sc','Ti','Ga','Ge','Kr','Y','Zr',
'Sn','Sb','Xe','La','Ce','Fr','Ra','Th',
'Nb','Tc','Ru','Rh','Pd','Pr','Nd','Pm','Eu','Tb','Dy','Ho','Er',
'Tm','Lu','Hf','Ta','Re','Os','Ir','Bi','Po','At','Rn','Ac','Pa',
'Np','Pu','Am','Cm','Bk','Cf','Es','Fm','Md','No',
'a','c','t/u','g','other na','nonbase']
################################################################################
# Dictionary of dictionaries of lists structure holding lists of DotInfo class objects,
# indexed by atom class and then by interaction type. Fill in empty lists for all of
# the possible classes and types.
self._interactionTypes = [
probeExt.InteractionType.WideContact,
probeExt.InteractionType.CloseContact,
probeExt.InteractionType.WeakHydrogenBond,
probeExt.InteractionType.SmallOverlap,
probeExt.InteractionType.Bump,
probeExt.InteractionType.BadBump,
probeExt.InteractionType.StandardHydrogenBond
]
self._clear_results();
################################################################################
# Sums of interaction types of dots based on whether their source and/or target
# were mainchain, sidechain, both, or neither. There is another place to store
# the sum of multiple passes.
# Each contains an entry for each InteractionType and for the total.
self._MCMCCount = {}
self._SCSCCount = {}
self._MCSCCount = {}
self._otherCount = {}
for t in self._interactionTypes:
self._MCMCCount[t] = 0
self._SCSCCount[t] = 0
self._MCSCCount[t] = 0
self._otherCount[t] = 0
################################################################################
# Generate sorted lists of the selected atoms, so that we run them in the same order
# they appear in the model file. This will group phantom hydrogens with the oxygens
# they are associated with because they share the same sequence ID.
# We add the location to the sorting criteria because the phantom hydrogens have the
# same sequence ID as their parent O and as each other.
self._source_atoms_sorted = sorted(source_atoms, key=lambda atom: "{} {:.3f} {:.3f} {:.3f}".format(
atom.i_seq, atom.xyz[0], atom.xyz[1], atom.xyz[2]))
self._target_atoms_sorted = sorted(target_atoms, key=lambda atom: "{} {:.3f} {:.3f} {:.3f}".format(
atom.i_seq, atom.xyz[0], atom.xyz[1], atom.xyz[2]))
################################################################################
# Find our group label
if self.params.output.format == 'raw':
groupLabel = ""
else:
groupLabel = "dots"
if len(self.params.output.group_label) > 0:
groupLabel = self.params.output.group_label
################################################################################
# Do the calculations; which one depends on the approach and other phil parameters.
# Append the information to the string that will be written to file.
if self.params.approach == 'count_atoms':
make_sub_header('Counting atoms', out=self.logger)
# Report the number of atoms in the source selection
outString += 'atoms selected: '+str(len(self._source_atoms_sorted))+'\n'
elif self.params.approach == 'surface':
make_sub_header('Find surface dots', out=self.logger)
# Produce dots on the surfaces of the selected atoms.
maxRadius = 2*self._maximumVDWRadius + 2 * self.params.probe.radius
for src in self._source_atoms_sorted:
# Find nearby atoms that might come into contact. This greatly speeds up the
# search for touching atoms.
maxRadius = (self._extraAtomInfo.getMappingFor(src).vdwRadius + self._maximumVDWRadius +
2 * self.params.probe.radius)
nearby = self._spatialQuery.neighbors(src.xyz, 0.001, maxRadius)
# Select those that are actually within the contact distance based on their
# particular radius.
atomList = []
for n in nearby:
d = (Helpers.rvec3(n.xyz) - Helpers.rvec3(src.xyz)).length()
if (d <= self._extraAtomInfo.getMappingFor(n).vdwRadius +
self._extraAtomInfo.getMappingFor(src).vdwRadius + 2*self.params.probe.radius):
atomList.append(n)
# Find out what class of dot we should place for this atom.
atomClass = self._atomClasses[src]
# Generate all of the dots for this atom.
self._generate_surface_dots_for(src, atomList)
# Count the dots if we've been asked to do so.
if self.params.output.count_dots:
numSkinDots = self._count_skin_dots(self._source_atoms_sorted, bondedNeighborLists)
if self.params.output.format != 'raw':
outString += self._describe_selection_and_parameters(groupLabel, "external")
nsel = len(self._source_atoms_sorted)
if self.params.output.format == 'raw':
outString += self._rawEnumerate("", nsel, False, True, numSkinDots, groupLabel)
else:
outString += self._describe_run("program:","command:")
outString += self._enumerate("extern dots", nsel, False, True, numSkinDots)
# Otherwise, produce the dots as output
else:
# Check for various output format types other than Kinemage.
# We're not implementing O format or XV format, but we still allow raw and oneline
if self.params.output.format == 'raw':
outString += self._writeRawOutput("1->none",groupLabel)
elif self.params.output.format == 'oneline':
# Do nothing for this mode when computing the surface
pass
elif self.params.output.format == 'standard': # Standard/Kinemage format
outString += self._describe_run("@caption"," command:")
masterName = "dots"
if len(self.params.output.group_name) > 0:
masterName = self.params.output.group_name
if self.params.output.add_group_line:
outString += "@group dominant {{{}}}\n".format(masterName)
outString += self._writeOutput("extern dots", masterName)
else:
raise ValueError("Unrecognized output format: "+self.params.output.format+" (internal error)")
elif self.params.approach == 'self':
make_sub_header('Find self-intersection dots', out=self.logger)
# Generate dots for the source atom set against itself.
self._generate_interaction_dots(self._source_atoms_sorted, self._source_atoms_sorted, bondedNeighborLists)
# Generate our report
outString += self._report_single_interaction(groupLabel, "self", "1->1", "SelfIntersect",
len(atomLists), modelIndex)
elif self.params.approach == 'once':
make_sub_header('Find single-direction intersection dots', out=self.logger)
# Generate dots for the source atom set against the target atom set.
self._generate_interaction_dots(self._source_atoms_sorted, self._target_atoms_sorted, bondedNeighborLists)
# Generate our report
outString += self._report_single_interaction(groupLabel, "once", "1->2", "IntersectOnce",
len(atomLists), modelIndex)
elif self.params.approach == 'both':
make_sub_header('Find both-directions intersection dots', out=self.logger)
# @todo The code below here is similar to -once but is repeated twice and has different string values.
# It is also somewhat re-ordered in terms of where the selection is printed. This keeps us from
# re-using _report_single_interaction() directly without generalizing it.
# Preliminary information before running both intersections.
if self.params.output.count_dots:
if self.params.output.format != 'raw':
outString += self._describe_run("program:","command:")
outString += self._describe_selection_and_parameters(groupLabel, "once")
else: # Not counting the dots
if self.params.output.format == 'raw':
pass
elif self.params.output.format == 'standard':
outString += self._describe_run("@caption"," command:")
if self.params.output.add_group_line:
outString += "@group {{{}}}\n".format(groupLabel)
# =================== First direction ========================
# Generate dots for the source atom set against the target atom set.
self._generate_interaction_dots(self._source_atoms_sorted, self._target_atoms_sorted, bondedNeighborLists)
# Count the dots if we've been asked to do so.
if self.params.output.count_dots:
numSkinDots = self._count_skin_dots(self._source_atoms_sorted, bondedNeighborLists)
nsel = len(self._source_atoms_sorted)
if self.params.output.format == 'raw':
outString += self._rawEnumerate("1->2", nsel, self.params.output.compute_scores, False, numSkinDots, groupLabel)
else:
outString += self._enumerate("1->2", nsel, self.params.output.compute_scores, False, numSkinDots)
else: # Not counting the dots
# Check for various output format types.
# We're not implementing O format or XV format, but we still allow raw and oneline
if self.params.output.format == 'raw':
outString += self._writeRawOutput("1->2",groupLabel)
elif self.params.output.format == 'oneline':
# Acculumlate but do not report results
outString += self._count_summary("IntersectBothWays 1->2", False)
elif self.params.output.format == 'standard': # Standard/Kinemage format
outString += self._writeOutput("1->2", groupLabel)
if self.params.output.contact_summary:
# Acculumlate but do not report results
outString += self._count_summary("IntersectBothWays 1->2", False)
# =================== Second direction ========================
# Clear the results before running interactions the other direction.
self._clear_results();
# Generate dots for the target atom set against the source atom set.
self._generate_interaction_dots(self._target_atoms_sorted, self._source_atoms_sorted, bondedNeighborLists)
# Count the dots if we've been asked to do so.
if self.params.output.count_dots:
numSkinDots = self._count_skin_dots(self._target_atoms_sorted, bondedNeighborLists)
nsel = len(self._target_atoms_sorted)
if self.params.output.format == 'raw':
outString += self._rawEnumerate("2->1", nsel, self.params.output.compute_scores, False, numSkinDots, groupLabel)
else:
outString += self._enumerate("2->1", nsel, self.params.output.compute_scores, False, numSkinDots)
else: # Not counting the dots
# Check for various output format types.
# We're not implementing O format or XV format, but we still allow raw and oneline
if self.params.output.format == 'raw':
outString += self._writeRawOutput("2->1",groupLabel)
elif self.params.output.format == 'oneline':
# Accumulate and report results
outString += self._count_summary("IntersectBothWays 2->1", True)
elif self.params.output.format == 'standard': # Standard/Kinemage format
outString += self._writeOutput("2->1", groupLabel)
if self.params.output.contact_summary:
# Accumulate and report results
outString += self._count_summary("IntersectBothWays 2->1", True)
else:
raise ValueError("Unrecognized output format: "+self.params.output.format+" (internal error)")
# Write the output to the specified file.
of = open(self.params.output.file_name,"w")
of.write(outString)
of.close()
# ------------------------------------------------------------------------------
def Test(self):
'''
Run tests on the methods of the class. Throw an assertion error if there is a problem with
one of them and return normally if there is not a problem.
'''
#=====================================================================================
# Test the _condense() method.
atoms = [ # Different atoms for different indices
pdb.hierarchy.atom(), pdb.hierarchy.atom(), pdb.hierarchy.atom(), pdb.hierarchy.atom()
]
# Name the atoms distinctly so that they will sort in order.
for i,a in enumerate(atoms):
a.name = str(i)
ag1 = pdb.hierarchy.atom_group()
for a in atoms:
ag1.append_atom(a)
rg1 = pdb.hierarchy.residue_group()
rg1.append_atom_group(ag1)
rg1.resseq = 1
c1 = pdb.hierarchy.chain()
c1.append_residue_group(rg1)
sourceTarget = [ # Index of source atom, target atom pairs to add into the dots list
(1,1), (1,2), (1,1), (1,2),
(2,1),
(3,1), (3,1), (3,1), (3,2), (3,2), (3,2)
]
dots = [ # Construct a test dots list based on the sourceTarget tuples.
self.DotInfo(atoms[src],atoms[trg],(0,0,0), (0,0,0), probeExt.OverlapType.Ignore, 0.0, ' ', 0.0)
for (src,trg) in sourceTarget
]
# Store state that we need to put back
stored = self.params.output.condensed
# Test when only sorting
self.params.output.condensed = False
inorder = self._condense(dots)
assert len(inorder) == len(dots), "probe2:Test(): Unexpected length from _condense when not condensing"
assert inorder[0].target == inorder[1].target, "probe2:Test(): Unexpected sorted value from _condense when not condensing"
assert inorder[1].target != inorder[2].target, "probe2:Test(): Unexpected sorted value from _condense when not condensing"
# Test when also condensing
self.params.output.condensed = True
inorder = self._condense(dots)
assert len(inorder) == 5, "probe2:Test(): Unexpected length from _condense when condensing"
assert inorder[0].target != inorder[1].target, "probe2:Test(): Unexpected sorted value from _condense when condensing"
assert inorder[-1].dotCount == 3, "probe2:Test(): Unexpected dot count value from _condense when condensing"
# Restore state
self.params.output.condensed = stored
#=====================================================================================
# Test the _totalInteractionCount() method. We make stand-in dictionaries using a stand-in
# list.
interactionTypes = [0, 1, 2, 3, 4, 5, 6]
MCMCCount = {}
for t in interactionTypes:
MCMCCount[t] = 1
assert self._totalInteractionCount(MCMCCount) == len(interactionTypes), "probe2:Test(): _totalInteractionCount(MCMCCount) failed"
#=====================================================================================
# @todo Unit tests for other methods
# ------------------------------------------------------------------------------
#def get_results(self):
# return group_args(model = self.model)
|
the-stack_0_24795
|
from airflow.models import Variable
import pandas as pd
import sqlalchemy as db
import configparser
import logging
# variables
SOURCE_MYSQL_HOST = Variable.get('SOURCE_MYSQL_HOST')
SOURCE_MYSQL_PORT = Variable.get('SOURCE_MYSQL_PORT')
SOURCE_MYSQL_USER = Variable.get('SOURCE_MYSQL_USER')
SOURCE_MYSQL_PASSWORD = Variable.get('SOURCE_MYSQL_PASSWORD')
SOURCE_MYSQL_ROOT_PASSWORD = Variable.get('SOURCE_MYSQL_ROOT_PASSWORD')
SOURCE_MYSQL_DATABASE = Variable.get('SOURCE_MYSQL_DATABASE')
DW_MYSQL_HOST = Variable.get('DW_MYSQL_HOST')
DW_MYSQL_PORT = Variable.get('DW_MYSQL_PORT')
DW_MYSQL_USER = Variable.get('DW_MYSQL_USER')
DW_MYSQL_PASSWORD = Variable.get('DW_MYSQL_PASSWORD')
DW_MYSQL_ROOT_PASSWORD = Variable.get('DW_MYSQL_ROOT_PASSWORD')
DW_MYSQL_DATABASE = Variable.get('DW_MYSQL_DATABASE')
# Database connection URI
db_conn_url = "mysql+pymysql://{}:{}@{}:{}/{}".format(SOURCE_MYSQL_USER,
SOURCE_MYSQL_PASSWORD,
SOURCE_MYSQL_HOST,
SOURCE_MYSQL_PORT,
SOURCE_MYSQL_DATABASE)
db_engine = db.create_engine(db_conn_url)
# Data warehouse connection URI
dw_conn_url = "mysql+pymysql://{}:{}@{}:{}/{}".format(DW_MYSQL_USER,
DW_MYSQL_PASSWORD,
DW_MYSQL_HOST,
DW_MYSQL_PORT,
DW_MYSQL_DATABASE)
dw_engine = db.create_engine(dw_conn_url)
def get_factSales_last_id(db_engine):
"""Function to get last sales_key from fact table `factSales`"""
query = "SELECT max(sales_key) AS last_id FROM factSales"
logging.info("Query={}".format(query))
tdf = pd.read_sql(query, db_engine)
return tdf.iloc[0]['last_id']
def extract_table_payment(last_id, execution_date, db_engine):
"""Function to extract table `payment`"""
if last_id == None:
last_id = -1
query = "SELECT * FROM payment WHERE payment_id > {} AND DATE(payment_date) <= '{}' LIMIT 100000".format(
last_id, execution_date)
logging.info("Query={}".format(query))
return pd.read_sql(query, db_engine)
def lookup_dim_customer(payment_df, db_engine):
"""Function to lookup table `dimCustomer`"""
unique_ids = list(payment_df.customer_id.unique())
unique_ids = list(filter(None, unique_ids))
query = "SELECT * FROM dimCustomer WHERE customer_key IN ({})".format(
','.join(map(str, unique_ids)))
logging.info("Query={}".format(query))
return pd.read_sql(query, db_engine)
def lookup_table_rental(payment_df, db_engine):
"""Function to lookup table `rental`"""
payment_df = payment_df.dropna(how='any', subset=['rental_id'])
unique_ids = list(payment_df.rental_id.unique())
query = "SELECT * FROM rental WHERE rental_id IN ({})".format(
','.join(map(str, unique_ids)))
logging.info("Query={}".format(query))
return pd.read_sql(query, db_engine)
def lookup_table_inventory(rental_df, db_engine):
"""Function to lookup table `inventory`"""
rental_df = rental_df.dropna(how='any', subset=['inventory_id'])
unique_ids = list(rental_df.inventory_id.unique())
query = "SELECT * FROM inventory WHERE inventory_id IN ({})".format(
','.join(map(str, unique_ids)))
logging.info("Query={}".format(query))
return pd.read_sql(query, db_engine)
def lookup_dim_movie(inventory_df, db_engine):
"""Function to lookup table `dimMovie`"""
inventory_df = inventory_df.dropna(how='any', subset=['film_id'])
unique_ids = list(inventory_df.film_id.unique())
query = "SELECT * FROM dimMovie WHERE movie_key IN ({})".format(
','.join(map(str, unique_ids)))
logging.info("Query={}".format(query))
return pd.read_sql(query, db_engine)
def lookup_dim_store(inventory_df, db_engine):
"""Function to lookup table `dimStore`"""
inventory_df = inventory_df.dropna(how='any', subset=['store_id'])
unique_ids = list(inventory_df.store_id.unique())
query = "SELECT * FROM dimStore WHERE store_key IN ({})".format(
','.join(map(str, unique_ids)))
logging.info("Query={}".format(query))
return pd.read_sql(query, db_engine)
def join_payment_dimCustomer(payment_df, dimCustomer_df):
"""Transformation: join table `payment` and `dim_dimCustomer`"""
logging.info('dimCustomer_df=\n{}'.format(dimCustomer_df))
joined_df = pd.merge(payment_df, dimCustomer_df, left_on='customer_id',
right_on='customer_key', how='left', validate="m:1")
logging.info('joined_df=\n{}'.format(joined_df))
payment_df = payment_df[['payment_id', 'customer_id',
'rental_id', 'amount', 'payment_date']]
return joined_df
def join_payment_rental(payment_df, rental_df):
"""Transformation: join table `payment` and `rental`"""
payment_df = pd.merge(payment_df, rental_df, left_on='rental_id',
right_on='rental_id', how='left', validate="1:1")
payment_df = payment_df[['payment_id', 'customer_key',
'inventory_id', 'amount', 'payment_date',]]
return payment_df
def join_payment_inventory(payment_df, inventory_df):
"""Transformation: join table `payment` and `inventory`"""
payment_df = pd.merge(payment_df, inventory_df, left_on='inventory_id',
right_on='inventory_id', how='left', validate="1:1")
payment_df = payment_df[['payment_id', 'customer_key',
'film_id', 'store_id', 'amount', 'payment_date',]]
return payment_df
def join_payment_dimMovie(payment_df, dimMovie_df):
"""Transformation: join table `payment` and `dimMovie`"""
payment_df = pd.merge(payment_df, dimMovie_df, left_on='film_id',
right_on='movie_key', how='left', validate="m:1")
payment_df = payment_df[['payment_id', 'customer_key',
'film_id', 'store_id', 'amount', 'payment_date',]]
return payment_df
def join_payment_dimStore(payment_df, dimStore_df):
"""Transformation: join table `payment` and `dimStore`"""
payment_df = pd.merge(payment_df, dimStore_df, left_on='store_id',
right_on='store_key', how='left', validate="m:1")
payment_df = payment_df[['payment_id', 'customer_key',
'film_id', 'store_id', 'amount', 'payment_date']]
return payment_df
def add_date_key(payment_df):
"""Add date_key smart key"""
payment_df['date_key'] = payment_df.payment_date.dt.strftime('%Y%m%d')
return payment_df
def rename_remove_columns(payment_df):
"""Rename and remove columns"""
payment_df = payment_df.rename({
'payment_id': 'sales_key',
'amount': 'sales_amount',
'film_id': 'movie_key',
'store_id': 'store_key',
'customer_id': 'customer_key'}, axis=1)
payment_df = payment_df[['sales_key', 'date_key', 'customer_key', 'movie_key', 'store_key', 'sales_amount']]
return payment_df
def validate(source_df, destination_df):
"""Function to validate transformation result"""
# make sure row count is equal between source and destination
source_row_count = source_df.shape[0]
destination_row_count = destination_df.shape[0]
if(source_row_count != destination_row_count):
raise ValueError(
'Transformation result is not valid: row count is not equal (source={}; destination={})'.format(
source_row_count, destination_row_count))
# make sure there is no null value in all dimenstion key
if destination_df['customer_key'].hasnans:
raise ValueError(
'Transformation result is not valid: column customer_key has NaN value')
return destination_df
def load_dim_payment(destination_df):
"""Load to data warehouse"""
destination_df.to_sql('factSales', dw_engine,
if_exists='append', index=False)
def run_job(**kwargs):
execution_date = kwargs["execution_date"].date()
logging.info("Execution datetime={}".format(execution_date))
############################################
# EXTRACT
############################################
# Get last payment_id from factSales data warehouse
last_id = get_factSales_last_id(dw_engine)
logging.info('last id={}'.format(last_id))
# Extract the payment table into a pandas DataFrame
payment_df = extract_table_payment(last_id, execution_date, db_engine)
logging.info('payment_df=\n{}'.format(payment_df))
# If no records fetched, then exit
if payment_df.shape[0] == 0:
logging.info('No new record in source table')
else:
# Extract lookup table `dimCustomer`
dimCustomer_df = lookup_dim_customer(payment_df, dw_engine)
logging.info('dimCustomer_df=\n{}'.format(dimCustomer_df))
# Extract lookup table `rental`
rental_df = lookup_table_rental(payment_df, db_engine)
logging.info('rental_df=\n{}'.format(rental_df))
# Extract lookup table `inventory`
inventory_df = lookup_table_inventory(rental_df, db_engine)
logging.info('inventory_df=\n{}'.format(inventory_df))
# Extract lookup table `dimMovie`
dimMovie_df = lookup_dim_movie(inventory_df, dw_engine)
logging.info('dimMovie_df=\n{}'.format(dimMovie_df))
# Extract lookup table `dimStore`
dimStore_df = lookup_dim_store(inventory_df, dw_engine)
logging.info('dimStore_df=\n{}'.format(dimStore_df))
############################################
# TRANSFORM
############################################
# Join table `payment` with `dimCustomer`
dim_payment_df = join_payment_dimCustomer(payment_df, dimCustomer_df)
logging.info('result_transform_1=\n{}'.format(dim_payment_df))
# Join table `payment` with `rental`
dim_payment_df = join_payment_rental(dim_payment_df, rental_df)
logging.info('result_transform_2=\n{}'.format(dim_payment_df))
# Join table `payment` with `inventory`
dim_payment_df = join_payment_inventory(dim_payment_df, inventory_df)
logging.info('result_transform_3=\n{}'.format(dim_payment_df))
# Join table `payment` with `dimMovie`
dim_payment_df = join_payment_dimMovie(dim_payment_df, dimMovie_df)
logging.info('result_transform_4=\n{}'.format(dim_payment_df))
# Join table `payment` with `dimStore`
dim_payment_df = join_payment_dimStore(dim_payment_df, dimStore_df)
logging.info('result_transform_5=\n{}'.format(dim_payment_df))
# Add date_key smart key
dim_payment_df = add_date_key(dim_payment_df)
logging.info('result_transform_6=\n{}'.format(dim_payment_df))
# Rename and remove columns
dim_payment_df = rename_remove_columns(dim_payment_df)
logging.info('result_transform_7=\n{}'.format(dim_payment_df))
# Validate result
dim_payment_df = validate(payment_df, dim_payment_df)
logging.info('result_transform_8=\n{}'.format(dim_payment_df.dtypes))
# ############################################
# # LOAD
# ############################################
# Load dimension table `factSales`
load_dim_payment(dim_payment_df)
|
the-stack_0_24801
|
"""
Deploy Single Shot Multibox Detector(SSD) model
===============================================
**Author**: `Yao Wang <https://github.com/kevinthesun>`_, \
`Leyuan Wang <https://github.com/Laurawly>`_
This article is an introductory tutorial to deploy SSD models with TVM.
We will use mxnet pretrained SSD model with Resnet50 as body network and
convert it to NNVM graph;
"""
import os
import zipfile
import tvm
import mxnet as mx
import cv2
import numpy as np
from nnvm import compiler
from nnvm.frontend import from_mxnet
from tvm import relay
from tvm.contrib.download import download
from tvm.contrib import graph_runtime
from mxnet.model import load_checkpoint
######################################################################
# Preliminary and Set parameters
# ------------------------------
# We should build TVM with sort support, in TVM root directory
#
# .. code-block:: bash
#
# echo "set(USE_SORT ON)" > config.mk
# make -j8
#
model_name = "ssd_resnet50_512"
model_file = "%s.zip" % model_name
test_image = "dog.jpg"
dshape = (1, 3, 512, 512)
dtype = "float32"
# Target settings
# Use these commented settings to build for cuda.
#target = 'cuda'
#ctx = tvm.gpu(0)
# Use these commented settings to build for opencl.
#target = 'opencl'
#ctx = tvm.opencl(0)
target = "llvm"
ctx = tvm.cpu()
######################################################################
# Download MXNet SSD pre-trained model and demo image
# ---------------------------------------------------
# Pre-trained model available at
# https://github.com/apache/incubator-\mxnet/tree/master/example/ssd
model_url = "https://github.com/zhreshold/mxnet-ssd/releases/download/v0.6/" \
"resnet50_ssd_512_voc0712_trainval.zip"
image_url = "https://cloud.githubusercontent.com/assets/3307514/20012567/" \
"cbb60336-a27d-11e6-93ff-cbc3f09f5c9e.jpg"
inference_symbol_folder = \
"c1904e900848df4548ce5dfb18c719c7-a28c4856c827fe766aa3da0e35bad41d44f0fb26"
inference_symbol_url = "https://gist.github.com/kevinthesun/c1904e900848df4548ce5dfb18c719c7/" \
"archive/a28c4856c827fe766aa3da0e35bad41d44f0fb26.zip"
dir = "ssd_model"
if not os.path.exists(dir):
os.makedirs(dir)
model_file_path = "%s/%s" % (dir, model_file)
test_image_path = "%s/%s" % (dir, test_image)
inference_symbol_path = "%s/inference_model.zip" % dir
download(model_url, model_file_path)
download(image_url, test_image_path)
download(inference_symbol_url, inference_symbol_path)
zip_ref = zipfile.ZipFile(model_file_path, 'r')
zip_ref.extractall(dir)
zip_ref.close()
zip_ref = zipfile.ZipFile(inference_symbol_path)
zip_ref.extractall(dir)
zip_ref.close()
######################################################################
# Convert and compile model with NNVM or Relay for CPU.
sym = mx.sym.load("%s/%s/ssd_resnet50_inference.json" % (dir, inference_symbol_folder))
_, arg_params, aux_params = load_checkpoint("%s/%s" % (dir, model_name), 0)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--frontend",
help="Frontend for compilation, nnvm or relay",
type=str,
default="nnvm")
args = parser.parse_args()
if args.frontend == "relay":
net, params = relay.frontend.from_mxnet(sym, {"data": dshape}, arg_params=arg_params, \
aux_params=aux_params)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(net, target, params=params)
elif args.frontend == "nnvm":
net, params = from_mxnet(sym, arg_params, aux_params)
with compiler.build_config(opt_level=3):
graph, lib, params = compiler.build(
net, target, {"data": dshape}, params=params)
else:
parser.print_help()
parser.exit()
######################################################################
# Create TVM runtime and do inference
# Preprocess image
image = cv2.imread(test_image_path)
img_data = cv2.resize(image, (dshape[2], dshape[3]))
img_data = img_data[:, :, (2, 1, 0)].astype(np.float32)
img_data -= np.array([123, 117, 104])
img_data = np.transpose(np.array(img_data), (2, 0, 1))
img_data = np.expand_dims(img_data, axis=0)
# Build TVM runtime
m = graph_runtime.create(graph, lib, ctx)
m.set_input('data', tvm.nd.array(img_data.astype(dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
######################################################################
# Display result
class_names = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair",
"cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant",
"sheep", "sofa", "train", "tvmonitor"]
def display(img, out, thresh=0.5):
import random
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = (10, 10)
pens = dict()
plt.clf()
plt.imshow(img)
for det in out:
cid = int(det[0])
if cid < 0:
continue
score = det[1]
if score < thresh:
continue
if cid not in pens:
pens[cid] = (random.random(), random.random(), random.random())
scales = [img.shape[1], img.shape[0]] * 2
xmin, ymin, xmax, ymax = [int(p * s) for p, s in zip(det[2:6].tolist(), scales)]
rect = plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False,
edgecolor=pens[cid], linewidth=3)
plt.gca().add_patch(rect)
text = class_names[cid]
plt.gca().text(xmin, ymin-2, '{:s} {:.3f}'.format(text, score),
bbox=dict(facecolor=pens[cid], alpha=0.5),
fontsize=12, color='white')
plt.show()
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
display(image, tvm_output.asnumpy()[0], thresh=0.45)
|
the-stack_0_24802
|
# -*- coding:utf-8 -*-
import time
from math import ceil
from typing import Any, Optional
from .console import Term
from ..common import Fx, get_width, run_cmd, confirm
class Widget(object):
_activation = False
def activate(self):
self._activation = True
def deactivate(self):
self._activation = False
def is_activation(self):
return self._activation
def _process_event(self):
raise NotImplementedError()
def _render(self, size):
raise NotImplementedError()
class SwitchWidget(Widget):
_activation = True
def __init__(self, sub_widgets: list = None, start_idx: int = 0):
self.idx = start_idx
if not sub_widgets:
self.sub_widgets: list[Widget] = []
self.sub_widgets_count = 0
else:
self.sub_widgets = sub_widgets
self.sub_widgets_count = len(sub_widgets)
def add(self, widget):
"""Add new sub widget."""
self.sub_widgets.append(widget)
self.sub_widgets_count += 1
def set_current(self, idx: int):
"""Set the top sub widget, if index is valid."""
if self.idx != idx and 0 <= idx < self.sub_widgets_count:
self.sub_widgets[self.idx].deactivate()
self.idx = idx
self.sub_widgets[self.idx].activate()
def process_keyevent(self, key: str) -> Optional[int]:
raise NotImplementedError()
def _process_event(self, key: str):
next_idx = self.process_keyevent(key)
if isinstance(next_idx, int):
self.set_current(next_idx)
else:
self.sub_widgets[self.idx]._process_event(key)
def _render(self, size):
"""
This widget cannot render any, call current sub widget ``_render``
"""
current_sub_widget = self.sub_widgets[self.idx]
if not current_sub_widget.is_activation():
current_sub_widget.activate()
current_sub_widget._render(size)
class RowPanelWidget(Widget):
def __init__(
self,
cursor: Optional[str] = None,
help_wait: float = 1.5,
widget: Widget = None,
**kwargs,
) -> None:
self.widget = widget
self.size = None
if not cursor or get_width(ord(cursor)) != 1:
self.cursor = "→"
else:
self.cursor = cursor
self.help_wait = help_wait
# Initialize.
self.cursor_row: int = 1
self.display_range: list = None # Allow display row range.
self.extra = 0 # Extra occupied row.
self.raw_data: list = None
self.show_data: list = None
self.update_raw: bool = False
for key, value in kwargs.items():
setattr(self, "_ex_{}".format(key), value)
def get_raw_data(self) -> list[Any]:
"""How to get the raw data."""
raise NotImplementedError()
def process_raw_data(self, raw_data: list[Any]) -> list[str]:
return raw_data
def generate_show_data(
self, raw_data: list[str], width: int
) -> list[tuple[str, int]]:
"""
Process the raw data, and indicate the number of additional
rows that need to be occupied when each piece of data is displayed.
"""
new_list = []
for line in raw_data:
text = Fx.uncolor(line)
count = 0
for ch in text:
count += get_width(ord(ch))
# [float] is to solve the division of python2 without
# retaining decimal places.
new_list.append((line, ceil(count / width) - 1))
return new_list
def print_line(self, line: str, is_cursor_row: bool) -> None:
"""How to output one line.
May has some different when current line is cursor line.
Support to process cursor line specially.
"""
raise NotImplementedError()
def update(self):
self.display_range = [1, self.size[1] - 1]
self.raw_data: list[Any] = self.get_raw_data()
self.show_data = self.generate_show_data(
self.process_raw_data(self.raw_data), self.size[0]
)
def emit(self, name, cb=None):
if name == "update":
self.update()
def _render(self, size):
if self.widget and self.widget.is_activation():
self.widget._render(self.size)
return
if self.size != size:
self.size = size
self.update()
# Adjust display row range.
while self.cursor_row < self.display_range[0]:
self.display_range = [i - 1 for i in self.display_range]
while self.cursor_row + self.extra > self.display_range[1]:
self.display_range = [i + 1 for i in self.display_range]
# Every time refresh the output, need to recalculate the
# number of additional rows, so need to reset to zero.
self.extra = 0
# Print needed display part.
for index, item in enumerate(self.show_data, start=1):
line, each_extra = item
if self.display_range[0] <= index <= self.display_range[1] - self.extra:
self.print_line(line, index == self.cursor_row)
self.extra += each_extra
def _process_event(self, key: str):
if self.widget and self.widget.is_activation():
# If has sub widget and it's activation.
self.widget._process_event(key)
elif self.is_activation():
# Process key.
if key in ["j", "down"]:
# select pre file.
self.cursor_row += 1
self.cursor_row = min(self.cursor_row, len(self.show_data))
elif key in ["k", "up"]:
# select next file.
self.cursor_row -= 1
self.cursor_row = max(self.cursor_row, 1)
elif key in ["J"]:
# scroll down 5 lines.
self.cursor_row += 5
self.cursor_row = min(self.cursor_row, len(self.show_data))
elif key in ["K"]:
# scroll up 5 line
self.cursor_row -= 5
self.cursor_row = max(self.cursor_row, 1)
elif key in ["?", "h"]:
print(Term.clear_screen)
print(
(
"k / ↑: select previous line.\n"
"j / ↓: select next line.\n"
"J: Scroll down 5 lines.\n"
"K: Scroll down 5 lines.\n"
"? / h : show help, wait {}s and exit.\n" + self.keyevent_help()
).format(self.help_wait)
)
time.sleep(self.help_wait)
else:
self.process_keyevent(key, self.cursor_row)
def process_keyevent(self, input_key: str, cursor_row: int) -> bool:
"""Handles keyboard events other than movement.
Args:
input_key (str): keyboard string.
cursor_row (int): current line.
data (Any): raw data.
Returns:
bool: whether need refresh data.
"""
pass
def keyevent_help(self) -> str:
"""Get extra keyevent help message.
Returns:
str: help message string.
"""
pass
class ConfirmWidget:
def __init__(self, msg: str, default: bool = True) -> None:
self.msg = msg
self.default = default
def run(self):
print(Term.clear_screen, end="")
return confirm(self.msg, self.default)
class CmdRunner:
def __init__(self, cmd: str, auto_run: bool = True) -> None:
self.cmd = cmd
self.auto_run = auto_run
if self.auto_run:
self.run()
def run(self):
print(Term.normal_screen)
res_code = run_cmd(self.cmd)
print(Term.alt_screen)
return res_code
|
the-stack_0_24803
|
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum_ltc import Wallet, WalletStorage
from electrum_ltc.util import UserCancelled, InvalidPassword
from electrum_ltc.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET
from electrum_ltc.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
class GoBack(Exception):
pass
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-LTC - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-ltc.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self, get_wallet_from_daemon):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-LTC wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.storage = wallet_from_memory.storage
else:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.storage.path)
if wallet_from_memory:
return wallet_from_memory
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet(get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(msg)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=()):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMaximumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
the-stack_0_24804
|
import os
import tdml
import copy
import pandas as pd
from sklearn.linear_model import LinearRegression, ElasticNet
if __name__ == "__main__":
df = pd.read_csv('../data/GOOG.csv')
# Drop Date and Adj Close fields
df = df.drop(columns=['Date', 'Adj Close'])
# Predicting the Close price
ds = tdml.Dataset(df, label='Close')
ds.transform()
# Split the dataset into train and test sets with seed 0
ds.train_test_split(seed=0)
# Print a brief view of the dataset
print(ds)
X_train, y_train = ds.train_x, ds.train_y
X_test, y_test = ds.test_x, ds.test_y
log = "The coefficient of determination of {} data is {}"
for model in [LinearRegression, ElasticNet]:
print("Using {}:".format(model.__name__))
m = model().fit(X_train, y_train)
train_score = m.score(X_train, y_train)
print(log.format("train", round(train_score, 5)))
test_score = m.score(X_test, y_test)
print(log.format("test", round(test_score, 5)))
print()
# Split to train / val / test three sets
ds.train_val_test_split(seed=0)
print(ds)
X_train, y_train = ds.train_x, ds.train_y
X_val, y_val = ds.val_x, ds.val_y
X_test, y_test = ds.test_x, ds.test_y
max_score = -1
best_model = None
for model in [LinearRegression, ElasticNet]:
print("Using {}:".format(model.__name__))
m = model().fit(X_train, y_train)
val_score = m.score(X_val, y_val)
print(log.format("val", round(val_score, 5)))
if(val_score > max_score):
max_score = val_score
best_model = copy.deepcopy(m)
print("{} has the highest val score and it has test score: {}".\
format(best_model.__class__.__name__, round(best_model.score(X_test, y_test), 5)))
|
the-stack_0_24805
|
import os
from downloader import encode_filename
class Database:
def __init__(self, pictures: dict, mirror: bool) -> None:
self.pictures = pictures
self.mirror = mirror
self.mk_dtbdir()
def mk_dtbdir(self):
if not os.path.exists("./data"):
os.mkdir("./data")
for key in self.pictures.keys():
fname = f"./data/{encode_filename(key)}"
if not os.path.exists(fname):
os.mkdir(fname)
os.mkdir(f"{fname}/pixiv")
os.mkdir(f"{fname}/mirror")
def write_map(self):
f = open("./data/illust.csv", 'a+', encoding='utf-8')
current = Database.get_key("./data/illust.csv")
if "id" not in f.readline():
f.write("id, name\n")
for key in self.pictures.keys():
sep_loc = key.find(' ')
id = key[:sep_loc]
if id in current:
continue
name = key[sep_loc + 1:]
f.write(f"{id}, {name}\n")
f.close()
def write_url(self):
for key, values in self.pictures.items():
fname = f"./data/{encode_filename(key)}/mirror/url.csv" if self.mirror else f"./data/{encode_filename(key)}/pixiv/url.csv"
f = open(fname, 'a+', encoding='utf-8')
current = Database.get_key(fname)
if "url" not in f.readline():
f.write("url, r18, fmt\n")
for value in values:
url, r18 = value
if url in current:
continue
fmt = url[-3:]
start = url.find('original/') + 9 if self.mirror else url.find('img/') + 4
end = url.rfind('.')
url = url[start:end]
f.write(f"{url}, {r18}, {fmt}\n")
f.close()
@staticmethod
def get_key(fname):
f = open(fname, 'r', encoding='utf-8')
res = f.readlines()[1:]
for i, r in enumerate(res):
res[i] = r[:r.find(',')]
f.close()
return res
|
the-stack_0_24807
|
class OPT_TRANSLATE:
def __init__(self, models = ['experiments/checkpoints/USPTO-50K/transformer_16w-25w.pt'], src = 'data/USPTO-50K/src-test.txt', reverse = False):
self.alpha=0.0
self.attn_debug=False
self.batch_size=64
self.beam_size=50
self.beta=-0.0
self.block_ngram_repeat=0
self.coverage_penalty='none'
self.data_type='text'
self.dump_beam=''
self.dynamic_dict=False
self.fast=False
self.gpu=0
self.ignore_when_blocking=[]
self.image_channel_size=3
self.length_penalty='none'
self.log_file=''
self.log_probs=False
self.mask_from=''
self.max_length=200
self.max_sent_length=None
self.min_length=0
self.models=models
self.n_best=50
self.output='experiments/results/transformer.txt'
self.replace_unk=True
self.report_bleu=False
self.report_rouge=False
self.sample_rate=16000
self.share_vocab=False
self.src=src
self.src_dir=''
self.stepwise_penalty=False
self.tgt=None
self.verbose=False
self.window='hamming'
self.window_size=0.02
self.window_stride=0.01
if reverse:
models = ['experiments/checkpoints/USPTO-50K-reverse/transformer_16w-25w.pt']
|
the-stack_0_24808
|
from typing import List, Dict
import csv
import argparse
import re
import datetime
import pickle
from flair.data_fetcher import NLPTaskDataFetcher, NLPTask
from flair.data import TaggedCorpus
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings, PooledFlairEmbeddings, CharacterEmbeddings, \
BertEmbeddings
data_folder = "/home/nelson/Data/auto_database_foundation/datasets/"
filenames_conll= {
"dev": data_folder+"CoNLL/valid.txt",
"test": data_folder+"CoNLL/test.txt",
"train": [
data_folder+"CoNLL/train.txt"
#"/work/wwang/corpora/CoNLL/original/train.txt",
#"/work/wwang/corpora/OntoNotes/training/ontonotes_full_scored_by_forward_lm.tagbymodel.scorebased.removeO.conll"
]
}
filenames_ontonotes= {
"dev": data_folder+"Ontonotes/ontonotes_dev.conll",
"test": data_folder+"Ontonotes/ontonotes_test.conll",
"train": [
data_folder+"Ontonotes/ontonotes_train.conll"
#"/work/wwang/corpora/CoNLL/original/train.txt",
#"/work/wwang/corpora/OntoNotes/training/ontonotes_full_scored_by_forward_lm.tagbymodel.scorebased.removeO.conll"
]
}
#class Token_desc:
def decoder(encoded_name: str, kb_type:str, comp_type: str) -> str:
if kb_type is 'Simple' or kb_type is 'Types':
if comp_type is 's' : # subject decoder:
name = encoded_name.replace('<','')
name = name.replace('>','')
name = name.replace('_',' ')
return name
if comp_type is 'p':
return encoded_name
if comp_type is 'o' : # object decoder:
name = encoded_name.replace('<','')
name = name.replace('>','')
"""
yatoSimpleTypes' object start with wikicat_ or wikinet_, if not, report new type
"""
if name.startswith('wikicat_') :
name = name[len('wikicat_'):]
elif name.startswith('wordnet_'): # wordnet format: <wordnet_person_100007846>
name = name[len('wordnet_'):]
name = name[:-len('_100007846')]
#else: only yagoGeoEntity
#print('############### Found new object type: {}\n'.format(name))
name = name.replace('_',' ')
return name
elif kb_type is 'Facts':
if comp_type is 's' : # subject decoder:
name = encoded_name.replace('<','')
name = name.replace('>','')
name = name.replace('_',' ')
return name
if comp_type is 'p':
name = encoded_name.replace('<', '')
name = name.replace('>', '')
name = name.replace('_', ' ')
return name
if comp_type is 'o' : # object decoder:
name = encoded_name.replace('<', '')
name = name.replace('>', '')
name = name.replace('_', ' ')
return name
elif kb_type is 'Date':
"""
format: <1st_Light_Car_Patrol_(Australia)> <wasDestroyedOnDate> "1919-##-##"^^xsd:date 1919.0000
"""
if comp_type is 's' : # subject decoder:
name = encoded_name.replace('<','')
name = name.replace('>','')
name = name.replace('_',' ')
return name
if comp_type is 'p':
name = encoded_name.replace('<', '')
name = name.replace('>', '')
name = name.replace('_', ' ')
return name
if comp_type is 'o' : # object decoder:
return encoded_name[1:-11]
def isNeedSubject(subject: str) -> bool:
# delete subjects from foreign language
if subject.startswith('/',2): # format: two characters abbr./subject
return False
elif '/' in subject:
#print('$$$$$$$$$$$$$ Exception of subject filtering: {}\n'.format(subject))
return True
else:
return True
class YAGO_data:
def __init__(self, params,kb_type):
#format:
self.kb : Dict[str,List[str]] = {}
self.kb_folder = params.folder
self.kb_type = kb_type
pass
def load_kb_tsv(self, kb_name) -> Dict:
"""
Format of different files:
- yagoFacts:
<id_J4g7!GNccC_5yH_seYbWPlbge> <Network_Rail> <owns> <Headstone_Lane_railway_station>
- yagoDateFacts:
<id_55R1FAawhE_8VX_N3WATqufJe> <Wedgewood_Village_Amusement_Park> <wasDestroyedOnDate> "1969-##-##"^^xsd:date 1969.0000
- yagoTypes:
<id_wGHfubCwBs_KCM_wTujFTpmfI> <Jean-Baptiste-Joseph_Gobel> rdf:type <wikicat_Roman_Catholic_archbishops_in_France>
- yagoTypes:
<es/Alberto_Ruiz_Largo> rdf:type <wikicat_Sporting_de_Gijón_B_players>
- yagoTransitiveType:
<id_QCt3Vm7wgc_KCM_D8FCd!kRW4> <Saccobolus_glaber> rdf:type <wikicat_Fungi>
only english mentions would be used so far (no data in form de/... or fr/...)
Upper cases & lower cases transformation
"""
#if kb_name == 'yagoSimpleTypes.tsv':
with open(self.kb_folder+kb_name) as file:
file.readline()
for line in file:
if self.kb_type == 'Simple':
shift_s = 0
shift_p = 0
shift_o = 0
else:
shift_s = 1
shift_p = 1
shift_o = 1
content = line.split()
f_subject = decoder(content[0+shift_s],self.kb_type,'s')
f_predicate = decoder(content[1+shift_p],self.kb_type,'p')
try:
f_object = decoder(content[2+shift_o],self.kb_type,'o')
except:
print(content)
if isNeedSubject(f_subject):
if f_subject in self.kb:
self.kb[f_subject].append(' '.join([f_predicate,f_object]))
else:
#print(f_predicate)
#print(f_object)
self.kb[f_subject] : List = []
self.kb[f_subject].append(' '.join([f_predicate,f_object]))
else: # add multilanguage
f_subject = f_subject[3:]
if f_subject in self.kb:
self.kb[f_subject].append(' '.join([f_predicate,f_object]))
else:
#print(f_predicate)
#print(f_object)
self.kb[f_subject] : List = []
self.kb[f_subject].append(' '.join([f_predicate,f_object]))
file.close()
return
def load_kb_pickle(self,kb_name:str):
with open(self.kb_folder+kb_name,'rb') as handle:
self.kb = pickle.load(handle)
return
def save_kb(self, type:List[str]):
if 'csv' in type:
with open(self.kb_folder+'{}_{}.csv'.format(self.kb_type,datetime.datetime.now().strftime("%Y_%m_%d_%H_%M")),'w') as output_file:
for key in self.kb.keys():
output_file.write('{}: {}\n'.format(key,'\t'.join(s for s in self.kb[key])))
if 'pickle' in type:
with open(self.kb_folder+'{}_{}.pickle'.format(self.kb_type,datetime.datetime.now().strftime("%Y_%m_%d_%H_%M")),'wb') as handle:
pickle.dump(self.kb, handle, protocol=pickle.HIGHEST_PROTOCOL)
pass
def get_lowercase_entity(self):
return {key.lower():'' for key in self.kb.keys()}
def match_entity(self, name: str ):
if name in self.kb:
return True
else:
return False
def get_desc(self,m):
return self.kb[m]
def get_embed(self, desc_sents: List[str]):
pass
#def calcu_token_completeness
if __name__=='main':
argparser = argparse.ArgumentParser()
argparser.add_argument("--folder", default="/home/nelson/Data/auto_database_foundation/datasets/YAGO/")
args = argparser.parse_args()
yago = YAGO_data(args)
yago.load_kb_tsv('yagoSimpleTypes.tsv')
yago.save_kb(['csv','pickle'])
argparser = argparse.ArgumentParser()
argparser.add_argument("--folder", default="/home/nelson/Data/auto_database_foundation/datasets/YAGO/")
args = argparser.parse_args()
#yago_simple = YAGO_data(args,'Simple')
#yago_simple.load_kb_pickle('yago_simple_multi_lingual/KB_2019_04_16_14_06.pickle')
#keys_simple = yago_simple.get_lowercase_entity()
#yago.load_kb_tsv('yagoSimpleTypes.tsv')
#kb_keys = yago.get_lowercase_entity()
#yago.save_kb(['csv','pickle'])
yago_facts = YAGO_data(args,'Facts')
#yago_facts.load_kb_tsv('yagoFacts.tsv')
yago_facts.load_kb_pickle('yago_facts/Facts_2019_04_16_21_22.pickle')
keys_facts = yago_facts.get_lowercase_entity()
#yago_facts.save_kb(['csv','pickle'])
yago_types = YAGO_data(args,'Types')
#yago_types.load_kb_tsv('yagoTypes.tsv')
yago_types.load_kb_pickle('yago_types/Types_2019_04_16_14_41.pickle')
keys_types = yago_types.get_lowercase_entity()
#yago_types.save_kb(['csv','pickle'])
yago_date = YAGO_data(args,'Date')
#yago_date.load_kb_tsv('yagoDateFacts.tsv')
yago_date.load_kb_pickle('yago_date/Date_2019_04_16_21_44.pickle')
keys_date = yago_date.get_lowercase_entity()
#yago_date.save_kb(['csv','pickle'])
corpus = NLPTaskDataFetcher.load_corpus(task=NLPTask['TAC'], files=filenames_conll)
#corpus_dict = corpus.make_vocab_dictionary()
sentences = corpus.get_all_sentences()
m_found = 0
m_miss = 0
for sentence in sentences:
spans = [tag.text for tag in sentence.get_spans('ner')]
for span in spans:
if span.lower() in keys_types or span.lower() in keys_facts or span.lower() in keys_date:
m_found += 1
else:
m_miss +=1
print("m_found: {}\tm_miss: {}\tfound rate: {}".format(str(m_found),str(m_miss),str(m_found/(m_found+m_miss))))
|
the-stack_0_24809
|
import json
from typing import Any, Dict
import unittest
# from copy import deepcopy
import pystac
from pystac.asset import Asset
from pystac.errors import ExtensionTypeError, STACError
from pystac.extensions.pointcloud import (
AssetPointcloudExtension,
PointcloudExtension,
PointcloudSchema,
PointcloudStatistic,
)
from tests.utils import TestCases, assert_to_from_dict
class PointcloudTest(unittest.TestCase):
def setUp(self) -> None:
self.maxDiff = None
self.example_uri = TestCases.get_path("data-files/pointcloud/example-laz.json")
self.example_uri_no_statistics = TestCases.get_path(
"data-files/pointcloud/example-laz-no-statistics.json"
)
def test_to_from_dict(self) -> None:
with open(self.example_uri) as f:
d = json.load(f)
assert_to_from_dict(self, pystac.Item, d)
def test_apply(self) -> None:
item = next(iter(TestCases.test_case_2().get_all_items()))
self.assertFalse(PointcloudExtension.has_extension(item))
PointcloudExtension.add_to(item)
PointcloudExtension.ext(item).apply(
1000,
"lidar",
"laszip",
[PointcloudSchema({"name": "X", "size": 8, "type": "floating"})],
)
self.assertTrue(PointcloudExtension.has_extension(item))
def test_validate_pointcloud(self) -> None:
item = pystac.Item.from_file(self.example_uri)
item.validate()
def test_count(self) -> None:
pc_item = pystac.Item.from_file(self.example_uri)
# Get
self.assertIn("pc:count", pc_item.properties)
pc_count = PointcloudExtension.ext(pc_item).count
self.assertEqual(pc_count, pc_item.properties["pc:count"])
# Set
PointcloudExtension.ext(pc_item).count = pc_count + 100
self.assertEqual(pc_count + 100, pc_item.properties["pc:count"])
# Validate
pc_item.validate()
# Cannot test validation errors until the pointcloud schema.json syntax is fixed
# Ensure setting bad count fails validation
with self.assertRaises(pystac.STACValidationError):
PointcloudExtension.ext(pc_item).count = "not_an_int" # type:ignore
pc_item.validate()
def test_type(self) -> None:
pc_item = pystac.Item.from_file(self.example_uri)
# Get
self.assertIn("pc:type", pc_item.properties)
pc_type = PointcloudExtension.ext(pc_item).type
self.assertEqual(pc_type, pc_item.properties["pc:type"])
# Set
PointcloudExtension.ext(pc_item).type = "sonar"
self.assertEqual("sonar", pc_item.properties["pc:type"])
# Validate
pc_item.validate()
def test_encoding(self) -> None:
pc_item = pystac.Item.from_file(self.example_uri)
# Get
self.assertIn("pc:encoding", pc_item.properties)
pc_encoding = PointcloudExtension.ext(pc_item).encoding
self.assertEqual(pc_encoding, pc_item.properties["pc:encoding"])
# Set
PointcloudExtension.ext(pc_item).encoding = "binary"
self.assertEqual("binary", pc_item.properties["pc:encoding"])
# Validate
pc_item.validate()
def test_schemas(self) -> None:
pc_item = pystac.Item.from_file(self.example_uri)
# Get
self.assertIn("pc:schemas", pc_item.properties)
pc_schemas = [s.to_dict() for s in PointcloudExtension.ext(pc_item).schemas]
self.assertEqual(pc_schemas, pc_item.properties["pc:schemas"])
# Set
schema = [PointcloudSchema({"name": "X", "size": 8, "type": "floating"})]
PointcloudExtension.ext(pc_item).schemas = schema
self.assertEqual(
[s.to_dict() for s in schema], pc_item.properties["pc:schemas"]
)
# Validate
pc_item.validate()
def test_statistics(self) -> None:
pc_item = pystac.Item.from_file(self.example_uri)
# Get
self.assertIn("pc:statistics", pc_item.properties)
statistics = PointcloudExtension.ext(pc_item).statistics
assert statistics is not None
pc_statistics = [s.to_dict() for s in statistics]
self.assertEqual(pc_statistics, pc_item.properties["pc:statistics"])
# Set
stats = [
PointcloudStatistic(
{
"average": 1,
"count": 1,
"maximum": 1,
"minimum": 1,
"name": "Test",
"position": 1,
"stddev": 1,
"variance": 1,
}
)
]
PointcloudExtension.ext(pc_item).statistics = stats
self.assertEqual(
[s.to_dict() for s in stats], pc_item.properties["pc:statistics"]
)
# Validate
pc_item.validate
def test_density(self) -> None:
pc_item = pystac.Item.from_file(self.example_uri)
# Get
self.assertIn("pc:density", pc_item.properties)
pc_density = PointcloudExtension.ext(pc_item).density
self.assertEqual(pc_density, pc_item.properties["pc:density"])
# Set
density = 100
PointcloudExtension.ext(pc_item).density = density
self.assertEqual(density, pc_item.properties["pc:density"])
# Validate
pc_item.validate()
def test_pointcloud_schema(self) -> None:
props: Dict[str, Any] = {
"name": "test",
"size": 8,
"type": "floating",
}
schema = PointcloudSchema(props)
self.assertEqual(props, schema.properties)
# test all getters and setters
for k in props:
if isinstance(props[k], str):
val = props[k] + str(1)
else:
val = props[k] + 1
setattr(schema, k, val)
self.assertEqual(getattr(schema, k), val)
schema = PointcloudSchema.create("intensity", 16, "unsigned")
self.assertEqual(schema.name, "intensity")
self.assertEqual(schema.size, 16)
self.assertEqual(schema.type, "unsigned")
with self.assertRaises(STACError):
schema.size = 0.5 # type: ignore
empty_schema = PointcloudSchema({})
with self.assertRaises(STACError):
empty_schema.size
with self.assertRaises(STACError):
empty_schema.name
with self.assertRaises(STACError):
empty_schema.type
def test_pointcloud_statistics(self) -> None:
props: Dict[str, Any] = {
"average": 1,
"count": 1,
"maximum": 1,
"minimum": 1,
"name": "Test",
"position": 1,
"stddev": 1,
"variance": 1,
}
stat = PointcloudStatistic(props)
self.assertEqual(props, stat.properties)
# test all getters and setters
for k in props:
if isinstance(props[k], str):
val = props[k] + str(1)
else:
val = props[k] + 1
setattr(stat, k, val)
self.assertEqual(getattr(stat, k), val)
stat = PointcloudStatistic.create("foo", 1, 2, 3, 4, 5, 6, 7)
self.assertEqual(stat.name, "foo")
self.assertEqual(stat.position, 1)
self.assertEqual(stat.average, 2)
self.assertEqual(stat.count, 3)
self.assertEqual(stat.maximum, 4)
self.assertEqual(stat.minimum, 5)
self.assertEqual(stat.stddev, 6)
self.assertEqual(stat.variance, 7)
stat.name = None # type: ignore
self.assertNotIn("name", stat.properties)
stat.position = None
self.assertNotIn("position", stat.properties)
stat.average = None
self.assertNotIn("average", stat.properties)
stat.count = None
self.assertNotIn("count", stat.properties)
stat.maximum = None
self.assertNotIn("maximum", stat.properties)
stat.minimum = None
self.assertNotIn("minimum", stat.properties)
stat.stddev = None
self.assertNotIn("stddev", stat.properties)
stat.variance = None
self.assertNotIn("variance", stat.properties)
empty_stat = PointcloudStatistic({})
with self.assertRaises(STACError):
empty_stat.name
def test_statistics_accessor_when_no_stats(self) -> None:
pc_item = pystac.Item.from_file(self.example_uri_no_statistics)
self.assertEqual(PointcloudExtension.ext(pc_item).statistics, None)
def test_asset_extension(self) -> None:
asset = Asset(
"https://github.com/PDAL/PDAL/blob"
"/a6c986f68458e92414a66c664408bee4737bbb08/test/data/laz"
"/autzen_trim.laz",
"laz file",
"The laz data",
"application/octet-stream",
["data"],
{"foo": "bar"},
)
pc_item = pystac.Item.from_file(self.example_uri_no_statistics)
pc_item.add_asset("data", asset)
ext = AssetPointcloudExtension(asset)
self.assertEqual(ext.asset_href, asset.href)
self.assertEqual(ext.properties, asset.properties)
self.assertEqual(ext.additional_read_properties, [pc_item.properties])
def test_ext(self) -> None:
pc_item = pystac.Item.from_file(self.example_uri_no_statistics)
PointcloudExtension.ext(pc_item)
asset = Asset(
"https://github.com/PDAL/PDAL/blob"
"/a6c986f68458e92414a66c664408bee4737bbb08/test/data/laz"
"/autzen_trim.laz",
"laz file",
"The laz data",
"application/octet-stream",
["data"],
{"foo": "bar"},
)
PointcloudExtension.ext(asset)
class RandomObject:
pass
with self.assertRaises(ExtensionTypeError):
PointcloudExtension.ext(RandomObject()) # type: ignore
def test_extension_not_implemented(self) -> None:
# Should raise exception if Item does not include extension URI
plain_item_uri = TestCases.get_path("data-files/item/sample-item.json")
item = pystac.Item.from_file(plain_item_uri)
with self.assertRaises(pystac.ExtensionNotImplemented):
_ = PointcloudExtension.ext(item)
# Should raise exception if owning Item does not include extension URI
asset = item.assets["thumbnail"]
with self.assertRaises(pystac.ExtensionNotImplemented):
_ = PointcloudExtension.ext(asset)
# Should succeed if Asset has no owner
ownerless_asset = pystac.Asset.from_dict(asset.to_dict())
_ = PointcloudExtension.ext(ownerless_asset)
def test_item_ext_add_to(self) -> None:
plain_item_uri = TestCases.get_path("data-files/item/sample-item.json")
item = pystac.Item.from_file(plain_item_uri)
self.assertNotIn(PointcloudExtension.get_schema_uri(), item.stac_extensions)
_ = PointcloudExtension.ext(item, add_if_missing=True)
self.assertIn(PointcloudExtension.get_schema_uri(), item.stac_extensions)
def test_asset_ext_add_to(self) -> None:
plain_item_uri = TestCases.get_path("data-files/item/sample-item.json")
item = pystac.Item.from_file(plain_item_uri)
self.assertNotIn(PointcloudExtension.get_schema_uri(), item.stac_extensions)
asset = item.assets["thumbnail"]
_ = PointcloudExtension.ext(asset, add_if_missing=True)
self.assertIn(PointcloudExtension.get_schema_uri(), item.stac_extensions)
|
the-stack_0_24810
|
import csv
import os
from collections import defaultdict
def processor_for_task(name):
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"sentiment":SentimentProcessor,
}
return processors[name]()
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class SentimentProcessor(DataProcessor):
"""Processor for the HTW Sentiment Dataset."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "model.train")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "model.valid")),"dev")
def get_test_examples(self, data_dir):
"""See base class."""
data = self._read_tsv(os.path.join(data_dir, "model.test"))
data = [line[1:] for line in data] # skip ds name
return self._create_examples(data,"test")
def get_text_data_by_dataset(self, data_dir):
print("loading testdata from " + os.path.join(data_dir, "model.test"))
data = self._read_tsv(os.path.join(data_dir, "model.test"))
#data = [line.replace("\n","").split("\t") for line in open(path, "r").readlines()]
#the data should look like this
#data = [["set-a","postivive","text"],["set-b","postivive","text"],["set-a","postivive","text"],["set-c","postivive","text"]]
# group by dataset
res = defaultdict(list)
for sample in data:
res[sample[0]].append(sample[1:])
#reformat data
data = [[k,v] for k,v in res.items()]
for dataset in data:
dataset[1] = self._create_examples(dataset[1],"test")
return data
def get_labels(self):
"""See base class."""
return ["positive", "negative", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
#if len(line[1]) > 128:
# continue #skip lines that are too long. otherwhise they will be truncated
# count tokens, not chars!!!
guid = "%s-%s" % (set_type, i)
text_a = line[1].replace("ä","ae").replace("ö","oe").replace("ü","ue").replace("ß","ss")
label = line[0].replace("__label__","")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
print("created {} examples".format(len(examples)))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
#logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
|
the-stack_0_24811
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
import HolidayDecorator
from toontown.toonbase import ToontownGlobals
from toontown.safezone import Playground
from toontown.town import Street
from toontown.estate import Estate
from pandac.PandaModules import Vec4, loadDNAFile, CSDefault, TransformState, NodePath, TransparencyAttrib
class HalloweenHolidayDecorator(HolidayDecorator.HolidayDecorator):
notify = DirectNotifyGlobal.directNotify.newCategory('HalloweenHolidayDecorator')
def __init__(self):
HolidayDecorator.HolidayDecorator.__init__(self)
def __checkStreetValidity(self):
if hasattr(base.cr.playGame, 'getPlace') and base.cr.playGame.getPlace() and isinstance(base.cr.playGame.getPlace(), Street.Street) and hasattr(base.cr.playGame.getPlace(), 'loader') and base.cr.playGame.getPlace().loader and hasattr(base.cr.playGame.getPlace().loader, 'geom') and base.cr.playGame.getPlace().loader.geom:
return True
else:
if hasattr(base.cr.playGame, 'getPlace') and base.cr.playGame.getPlace():
self.notify.debug('Failed Street Check %s' % base.cr.playGame.getPlace())
else:
self.notify.debug('Failed Street Check')
return False
def __checkHoodValidity(self):
if (hasattr(base.cr.playGame, 'getPlace') and
base.cr.playGame.getPlace() and
(isinstance(base.cr.playGame.getPlace(), Playground.Playground) or
isinstance(base.cr.playGame.getPlace(), Estate.Estate)) and
hasattr(base.cr.playGame.getPlace(), 'loader') and
base.cr.playGame.getPlace().loader and
hasattr(base.cr.playGame.getPlace().loader, 'hood') and
base.cr.playGame.getPlace().loader.hood and
hasattr(base.cr.playGame.getPlace().loader.hood, 'loader') and
base.cr.playGame.getPlace().loader.hood.loader and
hasattr(base.cr.playGame.getPlace().loader.hood.loader, 'geom') and
base.cr.playGame.getPlace().loader.hood.loader.geom):
return True
else:
if hasattr(base.cr.playGame, 'getPlace') and base.cr.playGame.getPlace():
self.notify.debug('Failed Hood Check %s' % base.cr.playGame.getPlace())
else:
self.notify.debug('Failed Hood Check')
return False
def __startSpookySky(self):
if (self.__checkHoodValidity() or self.__checkStreetValidity()) and hasattr(base.cr.playGame.hood, 'sky') and base.cr.playGame.hood.sky:
base.cr.playGame.hood.startSpookySky()
def __stopSpookySky(self):
if (self.__checkHoodValidity() or self.__checkStreetValidity()) and hasattr(base.cr.playGame.hood, 'sky') and base.cr.playGame.hood.sky:
base.cr.playGame.hood.endSpookySky()
def decorate(self):
self.updateHoodDNAStore()
self.swapIval = self.getSwapVisibleIval()
if self.swapIval:
self.swapIval.start()
def __lightDecorationOn__():
place = base.cr.playGame.getPlace()
if hasattr(place, 'halloweenLights'):
if not self.__checkStreetValidity():
return
else:
place.halloweenLights = place.loader.geom.findAllMatches('**/*light*')
place.halloweenLights += place.loader.geom.findAllMatches('**/*lamp*')
place.halloweenLights += place.loader.geom.findAllMatches('**/prop_snow_tree*')
for light in place.halloweenLights:
light.setColorScaleOff(0)
elif not self.__checkHoodValidity():
return
else:
place.loader.hood.halloweenLights = place.loader.hood.loader.geom.findAllMatches('**/*light*')
place.loader.hood.halloweenLights += place.loader.hood.loader.geom.findAllMatches('**/*lamp*')
place.loader.hood.halloweenLights += place.loader.hood.loader.geom.findAllMatches('**/prop_snow_tree*')
for light in place.loader.hood.halloweenLights:
light.setColorScaleOff(0)
holidayIds = base.cr.newsManager.getDecorationHolidayId()
if ToontownGlobals.HALLOWEEN_COSTUMES not in holidayIds and ToontownGlobals.SPOOKY_COSTUMES not in holidayIds:
return
if (self.__checkHoodValidity() or self.__checkStreetValidity()) and hasattr(base.cr.playGame, 'hood') and base.cr.playGame.hood and hasattr(base.cr.playGame.hood, 'sky') and base.cr.playGame.hood.sky:
preShow = Sequence(Parallel(LerpColorScaleInterval(base.cr.playGame.hood.sky, 1.5, Vec4(1, 1, 1, 0.25)), LerpColorScaleInterval(base.cr.playGame.hood.loader.geom, 2.5, Vec4(0.55, 0.55, 0.65, 1)), Func(__lightDecorationOn__)), Func(self.__startSpookySky))
preShow.start()
distributedEstate = base.cr.doFind('DistributedEstate')
if distributedEstate:
distributedEstate.loadWitch()
def undecorate(self):
if (self.__checkHoodValidity() or self.__checkStreetValidity()) and hasattr(base.cr.playGame.hood, 'sky') and base.cr.playGame.hood.sky:
postShow = Sequence(Parallel(LerpColorScaleInterval(base.cr.playGame.hood.sky, 1.5, Vec4(1, 1, 1, 1)), LerpColorScaleInterval(base.cr.playGame.hood.loader.geom, 2.5, Vec4(1, 1, 1, 1))), Func(self.__stopSpookySky))
postShow.start()
distributedEstate = base.cr.doFind('DistributedEstate')
if distributedEstate:
distributedEstate.unloadWitch()
holidayIds = base.cr.newsManager.getDecorationHolidayId()
if len(holidayIds) > 0:
self.decorate()
return
storageFile = base.cr.playGame.hood.storageDNAFile
if storageFile:
loadDNAFile(self.dnaStore, storageFile, CSDefault)
self.swapIval = self.getSwapVisibleIval()
if self.swapIval:
self.swapIval.start()
|
the-stack_0_24813
|
#!/usr/bin/env python
#
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Verify that when an option is specified with nargs > 1,
SCons consumes those correctly into the args.
"""
import TestSCons
test = TestSCons.TestSCons()
# First, test an option with nargs=2 and no others:
test.write(
'SConstruct',
"""\
env = Environment()
AddOption('--extras',
nargs=2,
dest='extras',
action='store',
type='string',
metavar='FILE1 FILE2',
default=(),
help='two extra files to install')
print(str(GetOption('extras')))
""",
)
# no args
test.run('-Q -q .', stdout="()\n")
# one arg, should fail
test.run(
'-Q -q . --extras A',
status=2,
stderr="""\
usage: scons [OPTIONS] [VARIABLES] [TARGETS]
SCons Error: --extras option requires 2 arguments
""",
)
# two args
test.run('-Q -q . --extras A B', status=1, stdout="('A', 'B')\n")
# -- means the rest are not processed as args
test.run('-Q -q . -- --extras A B', status=1, stdout="()\n")
# Now test what has been a bug: another option is
# also defined, this impacts the collection of args for the nargs>1 opt
test.write(
'SConstruct',
"""\
env = Environment()
AddOption(
'--prefix',
nargs=1,
dest='prefix',
action='store',
type='string',
metavar='DIR',
help='installation prefix',
)
AddOption(
'--extras',
nargs=2,
dest='extras',
action='store',
type='string',
metavar='FILE1 FILE2',
default=(),
help='two extra files to install',
)
print(str(GetOption('prefix')))
print(str(GetOption('extras')))
""",
)
# no options
test.run('-Q -q .', stdout="None\n()\n")
# one single-arg option
test.run('-Q -q . --prefix=/home/foo', stdout="/home/foo\n()\n")
# one two-arg option
test.run('-Q -q . --extras A B', status=1, stdout="None\n('A', 'B')\n")
# single-arg option followed by two-arg option
test.run(
'-Q -q . --prefix=/home/foo --extras A B',
status=1,
stdout="/home/foo\n('A', 'B')\n",
)
# two-arg option followed by single-arg option
test.run(
'-Q -q . --extras A B --prefix=/home/foo',
status=1,
stdout="/home/foo\n('A', 'B')\n",
)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_24814
|
#!/usr/bin/env py.test
"""Tests for manage.py."""
from __future__ import absolute_import, print_function, unicode_literals
from pathlib import Path
from unittest.mock import Mock
import py
import pytest
from pypiserver import manage
from pypiserver.core import PkgFile
from pypiserver.pkg_helpers import guess_pkgname_and_version, parse_version
from pypiserver.manage import (
PipCmd,
build_releases,
filter_stable_releases,
filter_latest_pkgs,
is_stable_version,
update_package,
update_all_packages,
)
def touch_files(root, files):
root = py.path.local(root) # pylint: disable=no-member
for f in files:
root.join(f).ensure()
def pkgfile_from_path(fn):
pkgname, version = guess_pkgname_and_version(fn)
return PkgFile(
pkgname=pkgname,
version=version,
root=py.path.local(fn)
.parts()[1]
.strpath, # noqa pylint: disable=no-member
fn=fn,
)
@pytest.mark.parametrize(
("version", "is_stable"),
[
("1.0", True),
("0.0.0", True),
("1.1beta1", False),
("1.2.10-123", True),
("5.5.0-DEV", False),
("1.2-rc1", False),
("1.0b1", False),
],
)
def test_is_stable_version(version, is_stable):
parsed_version = parse_version(version)
assert is_stable_version(parsed_version) == is_stable
def test_build_releases():
p = pkgfile_from_path("/home/ralf/pypiserver/d/greenlet-0.2.zip")
expected = dict(
parsed_version=("00000000", "00000003", "*final"),
pkgname="greenlet",
replaces=p,
version="0.3.0",
)
(res,) = list(build_releases(p, ["0.3.0"]))
for k, v in expected.items():
assert getattr(res, k) == v
def test_filter_stable_releases():
p = pkgfile_from_path("/home/ralf/pypiserver/d/greenlet-0.2.zip")
assert list(filter_stable_releases([p])) == [p]
p2 = pkgfile_from_path("/home/ralf/pypiserver/d/greenlet-0.5rc1.zip")
assert list(filter_stable_releases([p2])) == []
def test_filter_latest_pkgs():
paths = [
"/home/ralf/greenlet-0.2.zip",
"/home/ralf/foo/baz-1.0.zip" "/home/ralf/bar/greenlet-0.3.zip",
]
pkgs = [pkgfile_from_path(x) for x in paths]
assert frozenset(filter_latest_pkgs(pkgs)) == frozenset(pkgs[1:])
def test_filter_latest_pkgs_case_insensitive():
paths = [
"/home/ralf/greenlet-0.2.zip",
"/home/ralf/foo/baz-1.0.zip" "/home/ralf/bar/Greenlet-0.3.zip",
]
pkgs = [pkgfile_from_path(x) for x in paths]
assert frozenset(filter_latest_pkgs(pkgs)) == frozenset(pkgs[1:])
@pytest.mark.parametrize(
"pip_ver, cmd_type",
(
("10.0.0", "d"),
("10.0.0rc10", "d"),
("10.0.0b10", "d"),
("10.0.0a3", "d"),
("10.0.0.dev8", "d"),
("10.0.0.dev8", "d"),
("18.0", "d"),
("9.9.8", "i"),
("9.9.8rc10", "i"),
("9.9.8b10", "i"),
("9.9.8a10", "i"),
("9.9.8.dev10", "i"),
("9.9", "i"),
),
)
def test_pip_cmd_root(pip_ver, cmd_type):
"""Verify correct determination of the command root by pip version."""
exp_cmd = (
"pip",
"-q",
"install" if cmd_type == "i" else "download",
)
assert tuple(PipCmd.update_root(pip_ver)) == exp_cmd
def test_pip_cmd_update():
"""Verify the correct determination of a pip command."""
index = "https://pypi.org/simple"
destdir = "foo/bar"
pkg_name = "mypkg"
pkg_version = "12.0"
cmd_root = ("pip", "-q", "download")
exp_cmd = cmd_root + (
"--no-deps",
"-i",
index,
"-d",
destdir,
"{}=={}".format(pkg_name, pkg_version),
)
assert exp_cmd == tuple(
PipCmd.update(cmd_root, destdir, pkg_name, pkg_version)
)
def test_pip_cmd_update_index_overridden():
"""Verify the correct determination of a pip command."""
index = "https://pypi.org/complex"
destdir = "foo/bar"
pkg_name = "mypkg"
pkg_version = "12.0"
cmd_root = ("pip", "-q", "download")
exp_cmd = cmd_root + (
"--no-deps",
"-i",
index,
"-d",
destdir,
"{}=={}".format(pkg_name, pkg_version),
)
assert exp_cmd == tuple(
PipCmd.update(cmd_root, destdir, pkg_name, pkg_version, index=index)
)
def test_update_package(monkeypatch):
"""Test generating an update command for a package."""
monkeypatch.setattr(manage, "call", Mock())
pkg = PkgFile("mypkg", "1.0", replaces=PkgFile("mypkg", "0.9"))
update_package(pkg, ".")
manage.call.assert_called_once_with(
( # pylint: disable=no-member
"pip",
"-q",
"download",
"--no-deps",
"-i",
"https://pypi.org/simple",
"-d",
".",
"mypkg==1.0",
)
)
def test_update_package_dry_run(monkeypatch):
"""Test generating an update command for a package."""
monkeypatch.setattr(manage, "call", Mock())
pkg = PkgFile("mypkg", "1.0", replaces=PkgFile("mypkg", "0.9"))
update_package(pkg, ".", dry_run=True)
assert not manage.call.mock_calls # pylint: disable=no-member
def test_update_all_packages(monkeypatch):
"""Test calling update_all_packages()"""
public_pkg_1 = PkgFile("Flask", "1.0")
public_pkg_2 = PkgFile("requests", "1.0")
private_pkg_1 = PkgFile("my_private_pkg", "1.0")
private_pkg_2 = PkgFile("my_other_private_pkg", "1.0")
roots_mock = {
Path("/opt/pypi"): [
public_pkg_1,
private_pkg_1,
],
Path("/data/pypi"): [public_pkg_2, private_pkg_2],
}
def core_listdir_mock(path: Path):
return roots_mock.get(path, [])
monkeypatch.setattr(manage, "listdir", core_listdir_mock)
monkeypatch.setattr(manage, "update", Mock(return_value=None))
destdir = None
dry_run = False
stable_only = True
update_all_packages(
roots=list(roots_mock.keys()),
destdir=destdir,
dry_run=dry_run,
stable_only=stable_only,
ignorelist=None,
)
manage.update.assert_called_once_with( # pylint: disable=no-member
frozenset([public_pkg_1, public_pkg_2, private_pkg_1, private_pkg_2]),
destdir,
dry_run,
stable_only,
)
def test_update_all_packages_with_blacklist(monkeypatch):
"""Test calling update_all_packages()"""
public_pkg_1 = PkgFile("Flask", "1.0")
public_pkg_2 = PkgFile("requests", "1.0")
private_pkg_1 = PkgFile("my_private_pkg", "1.0")
private_pkg_2 = PkgFile("my_other_private_pkg", "1.0")
roots_mock = {
Path("/opt/pypi"): [
public_pkg_1,
private_pkg_1,
],
Path("/data/pypi"): [public_pkg_2, private_pkg_2],
}
def core_listdir_mock(path: Path):
return roots_mock.get(path, [])
monkeypatch.setattr(manage, "listdir", core_listdir_mock)
monkeypatch.setattr(manage, "update", Mock(return_value=None))
destdir = None
dry_run = False
stable_only = True
update_all_packages(
roots=list(roots_mock.keys()),
destdir=destdir,
dry_run=dry_run,
stable_only=stable_only,
ignorelist=["my_private_pkg", "my_other_private_pkg"],
)
manage.update.assert_called_once_with( # pylint: disable=no-member
frozenset([public_pkg_1, public_pkg_2]), destdir, dry_run, stable_only
)
|
the-stack_0_24815
|
import torch
import random
import numpy as np
from PIL import Image, ImageOps, ImageFilter
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = mean
self.std = std
def __call__(self, sample):
src_img = sample['src_image']
tgt_img = sample['tgt_image']
mask = sample['src_label']
src_img = np.array(src_img).astype(np.float32)
tgt_img = np.array(tgt_img).astype(np.float32)
mask = np.array(mask).astype(np.float32)
src_img /= 255.0
src_img -= self.mean
src_img /= self.std
tgt_img /= 255.0
tgt_img -= self.mean
tgt_img /= self.std
return {'src_image': src_img,
'tgt_image':tgt_img,
'src_label': mask}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
src_img = sample['src_image']
tgt_img = sample['tgt_image']
mask = sample['src_label']
src_img = np.array(src_img).astype(np.float32).transpose((2, 0, 1))
tgt_img = np.array(tgt_img).astype(np.float32).transpose((2, 0, 1))
mask = np.array(mask).astype(np.float32)
src_img = torch.from_numpy(src_img).float()
tgt_img = torch.from_numpy(tgt_img).float()
mask = torch.from_numpy(mask).float()
return {'src_image': src_img,
'tgt_image': tgt_img,
'src_label': mask}
class RandomHorizontalFlip(object):
def __call__(self, sample):
src_img = sample['src_image']
tgt_img = sample['tgt_image']
mask = sample['src_label']
if random.random() < 0.5:
src_img = src_img.transpose(Image.FLIP_LEFT_RIGHT)
tgt_img = tgt_img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
return {'src_image': src_img,
'tgt_image': tgt_img,
'src_label': mask}
class RandomRotate(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, sample):
src_img = sample['src_image']
tgt_img = sample['tgt_image']
mask = sample['src_label']
rotate_degree = random.uniform(-1*self.degree, self.degree)
src_img = src_img.rotate(rotate_degree, Image.BILINEAR)
tgt_img = tgt_img.rotate(rotate_degree, Image.BILINEAR)
mask = mask.rotate(rotate_degree, Image.NEAREST)
return {'src_image': src_img,
'tgt_image': tgt_img,
'src_label': mask}
class RandomGaussianBlur(object):
def __call__(self, sample):
src_img = sample['src_image']
tgt_img = sample['tgt_image']
mask = sample['src_label']
if random.random() < 0.5:
src_img = src_img.filter(ImageFilter.GaussianBlur(
radius=random.random()))
tgt_img = tgt_img.filter(ImageFilter.GaussianBlur(
radius=random.random()))
return {'src_image': src_img,
'tgt_image': tgt_img,
'src_label': mask}
class RandomScaleCrop(object):
def __init__(self, base_size, crop_size, fill=0):
self.base_size = base_size
self.crop_size = crop_size
self.fill = fill
def __call__(self, sample):
src_img = sample['src_image']
tgt_img = sample['tgt_image']
mask = sample['src_label']
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = src_img.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
src_img = src_img.resize((ow, oh), Image.BILINEAR)
tgt_img = tgt_img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < self.crop_size:
padh = self.crop_size - oh if oh < self.crop_size else 0
padw = self.crop_size - ow if ow < self.crop_size else 0
src_img = ImageOps.expand(src_img, border=(0, 0, padw, padh), fill=0)
tgt_img = ImageOps.expand(tgt_img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill)
# random crop crop_size
w, h = src_img.size
x1 = random.randint(0, w - self.crop_size)
y1 = random.randint(0, h - self.crop_size)
src_img = src_img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
tgt_img = tgt_img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
return {'src_image': src_img,
'tgt_image': tgt_img,
'src_label': mask}
class FixScaleCrop(object):
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, sample):
src_img = sample['src_image']
tgt_img = sample['tgt_image']
mask = sample['src_label']
w, h = src_img.size
if w > h:
oh = self.crop_size
ow = int(1.0 * w * oh / h)
else:
ow = self.crop_size
oh = int(1.0 * h * ow / w)
src_img = src_img.resize((ow, oh), Image.BILINEAR)
tgt_img = tgt_img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = src_img.size
x1 = int(round((w - self.crop_size) / 2.))
y1 = int(round((h - self.crop_size) / 2.))
src_img = src_img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
tgt_img = tgt_img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
return {'src_image': src_img,
'tgt_image': tgt_img,
'src_label': mask}
class FixedResize(object):
def __init__(self, size):
self.size = (size, size) # size: (h, w)
def __call__(self, sample):
src_img = sample['src_image']
tgt_img = sample['tgt_image']
mask = sample['src_label']
assert src_img.size == mask.size
src_img = src_img.resize(self.size, Image.BILINEAR)
tgt_img = tgt_img.resize(self.size, Image.BILINEAR)
mask = mask.resize(self.size, Image.NEAREST)
return {'src_image': src_img,
'tgt_image': tgt_img,
'src_label': mask}
|
the-stack_0_24816
|
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/check_frontend_test_coverage.py."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import subprocess
import sys
from core.tests import test_utils
import python_utils
from . import check_frontend_test_coverage
class CheckFrontEndCoverageTests(test_utils.GenericTestBase):
def setUp(self):
super(CheckFrontEndCoverageTests, self).setUp()
self.lcov_items_list = None
self.check_function_calls = {
'open_file_is_called': False,
'exists_is_called': False,
}
self.expected_check_function_calls = {
'open_file_is_called': True,
'exists_is_called': True,
}
self.printed_messages = []
class MockFile(python_utils.OBJECT):
def __init__(self, lcov_items_list):
self.lcov_items_list = lcov_items_list
def read(self): # pylint: disable=missing-docstring
return self.lcov_items_list
def mock_open_file(file_name, option): # pylint: disable=unused-argument
self.check_function_calls['open_file_is_called'] = True
return MockFile(self.lcov_items_list)
def mock_exists(unused_path):
self.check_function_calls['exists_is_called'] = True
return True
def mock_print(message):
self.printed_messages.append(message)
def mock_check_call(command): # pylint: disable=unused-argument
self.check_function_calls['check_call_is_called'] = True
self.open_file_swap = self.swap(
python_utils, 'open_file', mock_open_file)
self.exists_swap = self.swap(os.path, 'exists', mock_exists)
self.print_swap = self.swap(python_utils, 'PRINT', mock_print)
self.check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
def test_get_stanzas_from_lcov_file(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:5\n'
'end_of_record\n'
'SF:/opensource/oppia/file2.ts\n'
'LF:10\n'
'LH:5\n'
'end_of_record\n'
'SF:/opensource/oppia/file3.ts\n'
'LF:10\n'
'LH:5\n'
'end_of_record\n'
)
with self.open_file_swap:
stanzas = check_frontend_test_coverage.get_stanzas_from_lcov_file()
self.assertEqual(stanzas[0].file_name, 'file.ts')
self.assertEqual(stanzas[0].total_lines, 10)
self.assertEqual(stanzas[0].covered_lines, 5)
self.assertEqual(stanzas[1].file_name, 'file2.ts')
self.assertEqual(stanzas[1].total_lines, 10)
self.assertEqual(stanzas[1].covered_lines, 5)
self.assertEqual(stanzas[2].file_name, 'file3.ts')
self.assertEqual(stanzas[2].total_lines, 10)
self.assertEqual(stanzas[2].covered_lines, 5)
def test_get_stanzas_from_lcov_file_file_name_exception(self):
self.lcov_items_list = (
'SF:\n'
'LF:10\n'
'LH:5\n'
'end_of_record\n'
)
with self.open_file_swap:
with self.assertRaisesRegexp(
Exception,
'The test path is empty or null. '
'It\'s not possible to diff the test coverage correctly.'):
check_frontend_test_coverage.get_stanzas_from_lcov_file()
def test_get_stanzas_from_lcov_file_total_lines_exception(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:\n'
'LH:5\n'
'end_of_record\n'
)
with self.open_file_swap:
with self.assertRaisesRegexp(
Exception,
'It wasn\'t possible to get the total lines of file.ts file.'
'It\'s not possible to diff the test coverage correctly.'):
check_frontend_test_coverage.get_stanzas_from_lcov_file()
def test_get_stanzas_from_lcov_file_covered_lines_exception(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:\n'
'end_of_record\n'
)
with self.open_file_swap:
with self.assertRaisesRegexp(
Exception,
'It wasn\'t possible to get the covered lines of file.ts file.'
'It\'s not possible to diff the test coverage correctly.'):
check_frontend_test_coverage.get_stanzas_from_lcov_file()
def test_check_coverage_changes(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
'SF:/opensource/oppia/file2.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES', [
'file.ts',
'file2.ts'
]
)
check_function_calls = {
'sys_exit_is_called': False,
}
expected_check_function_calls = {
'sys_exit_is_called': False,
}
def mock_sys_exit(error_message): # pylint: disable=unused-argument
check_function_calls['sys_exit_is_called'] = True
sys_exit_swap = self.swap(sys, 'exit', mock_sys_exit)
with sys_exit_swap, self.exists_swap, self.open_file_swap, self.print_swap: # pylint: disable=line-too-long
with not_fully_covered_files_swap:
check_frontend_test_coverage.check_coverage_changes()
self.assertEqual(
check_function_calls,
expected_check_function_calls)
def test_check_coverage_changes_error(self):
def mock_exists(unused_path):
return False
exists_swap = self.swap(os.path, 'exists', mock_exists)
with exists_swap:
with self.assertRaisesRegexp(
Exception,
'Expected lcov file to be'
r' available at [A-Za-z\._/]+, but the file does not exist.'):
check_frontend_test_coverage.check_coverage_changes()
def test_check_coverage_changes_for_covered_files(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
'SF:node_modules/oppia/anotherfile.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES', []
)
with self.exists_swap, self.open_file_swap, self.print_swap:
with not_fully_covered_files_swap:
with self.assertRaisesRegexp(
SystemExit,
re.escape(
'\033[1mfile.ts\033[0m seems to be not completely '
'tested. Make sure it\'s fully covered.\n')):
check_frontend_test_coverage.check_coverage_changes()
def test_check_coverage_changes_remove_file(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:10\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES', [
'file.ts'
]
)
with self.exists_swap, self.open_file_swap, self.print_swap:
with not_fully_covered_files_swap:
with self.assertRaisesRegexp(
SystemExit,
re.escape(
'\033[1mfile.ts\033[0m seems to be fully covered! '
'Before removing it manually from the denylist '
'in the file '
'scripts/check_frontend_test_coverage.py, please '
'make sure you\'ve followed the unit tests rules '
'correctly on: '
'https://github.com/oppia/oppia/wiki/Frontend-unit'
'-tests-guide#rules\n')):
check_frontend_test_coverage.check_coverage_changes()
def test_check_coverage_changes_when_renaming_file(self):
self.lcov_items_list = (
'SF:/opensource/oppia/newfilename.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES', [
'file.ts'
]
)
with self.exists_swap, self.open_file_swap, self.print_swap:
with not_fully_covered_files_swap:
with self.assertRaisesRegexp(
SystemExit,
re.escape(
'\033[1mnewfilename.ts\033[0m seems to be not '
'completely tested. Make sure it\'s fully covered.\n'
'\033[1mfile.ts\033[0m is in the frontend test '
'coverage denylist but it doesn\'t exist anymore. If '
'you have renamed it, please make sure to remove the '
'old file name and add the new file name in the '
'denylist in the file scripts/'
'check_frontend_test_coverage.py.\n')):
check_frontend_test_coverage.check_coverage_changes()
def test_fully_covered_filenames_is_sorted(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
'SF:/opensource/oppia/anotherfile.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
'SF:node_modules/oppia/thirdfile.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES', [
'anotherfile.ts'
'file.ts',
]
)
check_function_calls = {
'sys_exit_is_called': False
}
expected_check_function_calls = {
'sys_exit_is_called': False
}
def mock_sys_exit(error_message): # pylint: disable=unused-argument
check_function_calls['sys_exit_is_called'] = True
sys_exit_swap = self.swap(sys, 'exit', mock_sys_exit)
with sys_exit_swap, self.exists_swap, self.open_file_swap, self.print_swap: # pylint: disable=line-too-long
with not_fully_covered_files_swap:
(
check_frontend_test_coverage
.check_not_fully_covered_filenames_list_is_sorted())
self.assertEqual(
check_function_calls,
expected_check_function_calls)
def test_fully_covered_filenames_is_not_sorted(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
'SF:/opensource/oppia/anotherfile.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES', [
'file.ts',
'anotherfile.ts'
]
)
with self.exists_swap, self.open_file_swap, self.print_swap:
with not_fully_covered_files_swap:
with self.assertRaisesRegexp(
SystemExit,
re.escape(
'The \033[1mNOT_FULLY_COVERED_FILENAMES\033[0m list '
'must be kept in alphabetical order.')):
(
check_frontend_test_coverage
.check_not_fully_covered_filenames_list_is_sorted())
def test_function_calls(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES', [
'file.ts'
])
with self.check_call_swap, self.exists_swap, self.open_file_swap:
with not_fully_covered_files_swap:
check_frontend_test_coverage.main()
self.assertEqual(
self.check_function_calls, self.expected_check_function_calls)
|
the-stack_0_24817
|
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
from torch.nn import functional as F
models_urls = {
'101_voc': 'https://cloudstor.aarnet.edu.au/plus/s/Owmttk9bdPROwc6/download',
'18_imagenet': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'34_imagenet': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'50_imagenet': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'152_imagenet': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'101_imagenet': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
}
def maybe_download(model_name, model_url, model_dir=None, map_location=None):
import os, sys
from six.moves import urllib
if model_dir is None:
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
filename = '{}.pth.tar'.format(model_name)
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
url = model_url
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urllib.request.urlretrieve(url, cached_file)
return torch.load(cached_file, map_location=map_location)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def conv3x3_bn(in_channel, out_channel):
return nn.Sequential(nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True))
class MultiResolutionFuse(nn.Module):
def __init__(self, in_size, out_size):
super(MultiResolutionFuse, self).__init__()
self.in_size=in_size
self.out_size=out_size
self.conv = nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, bias=False)
def forward(self, input_low, input_high):
high_size = input_high.size()[2:]
# low channel usually > high channel
if self.in_size != self.out_size:
input_low = self.conv(input_low)
upsample_low = F.upsample(input_low, high_size, mode='bilinear')
cat = torch.cat([upsample_low, input_high], dim=1)
return cat
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class RefineBlock(nn.Module):
def __init__(self, in_channel):
super(RefineBlock, self).__init__()
self.c1 = nn.Conv2d(in_channel, 512,kernel_size=1, stride=1, padding=0, bias=False)
self.c3_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.bn = nn.BatchNorm2d(512)
self.relu = nn.ReLU(inplace=True)
self.c3_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
x1 = self.c1(x)
x = self.c3_1(x1)
x = self.bn(x)
x = self.relu(x)
x = self.c3_2(x)
out = x1 + x
return out
# only refine block, no FPA
class MV2_base_0_ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
super(MV2_base_0_ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.rb1_1 = RefineBlock(256)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.rb2_1 = RefineBlock(512)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.rb3_1 = RefineBlock(1024)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.rb4_1 = RefineBlock(2048)
# self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes)
# only for >=res50
# self.fpa=FPA(2048,512)
# self.fpa = FPA(512, 512)
# self.rb4_2 = RefineBlock(512 * 4)
self.fuse43 = MultiResolutionFuse(512, 512)
# self.post_proc43 = conv3x3_bn(512*2,512)
self.rb3_2 = RefineBlock(512 * 2)
self.fuse32 = MultiResolutionFuse(512, 512)
self.rb2_2 = RefineBlock(512 * 2)
# self.post_proc32 = conv3x3_bn(512)
self.fuse21 = MultiResolutionFuse(512, 512)
self.rb1_2 = RefineBlock(512 * 2)
# self.post_proc21 = conv3x3_bn(512)
self.class_conv = nn.Conv2d(512, num_classes, kernel_size=3, stride=1,
padding=1, bias=True)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
ori_size = x.size()[2:]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
l1 = self.layer1(x)
l2 = self.layer2(l1)
l3 = self.layer3(l2)
l4 = self.layer4(l3)
l1 = self.rb1_1(l1)
l2 = self.rb2_1(l2)
l3 = self.rb3_1(l3)
l4 = self.rb4_1(l4)
# l4 = self.fpa(l4)
# l4=self.rb4_2(l4)
x_fuse43 = self.fuse43(l4, l3)
x_fuse43=self.rb3_2(x_fuse43)
x_fuse32 = self.fuse32(x_fuse43, l2)
x_fuse32=self.rb2_2(x_fuse32)
x_fuse21 = self.fuse21(x_fuse32, l1)
x_fuse21=self.rb1_2(x_fuse21)
x = self.class_conv(x_fuse21)
x = F.upsample(x, ori_size, mode='bilinear')
return x
def MV2_base_0_ResNet18(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_base_0_ResNet(BasicBlock, [2, 2, 2, 2], **kwargs, num_classes=num_classes)
if pretrained:
key = '18_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def MV2_base_0_ResNet34(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_base_0_ResNet(BasicBlock, [3, 4, 6, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '34_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def MV2_base_0_ResNet50(num_classes, pretrained=True, **kwargs):
"""Constructs a MV1_ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_base_0_ResNet(Bottleneck, [3, 4, 6, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '50_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
print("load imagenet res50")
return model
def MV2_base_0_ResNet101(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_base_0_ResNet(Bottleneck, [3, 4, 23, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '101_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def MV2_base_0_ResNet152(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_base_0_ResNet(Bottleneck, [3, 8, 36, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '152_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
|
the-stack_0_24818
|
import numpy as np
from dezero import Variable
import dezero.functions as F
import dezero.layers as L
np.random.seed(0)
x = np.random.rand(100, 1)
y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)
x, y = Variable(x), Variable(y)
I, H, O = 1, 10, 1
l1 = L.Linear(H) # or L.Linear(I, H)
l2 = L.Linear(O) # or L.Linear(H, O)
def predict(x):
y = l1(x)
y = F.sigmoid(y)
y = l2(y)
return y
lr = 0.2
iters = 10000
for i in range(iters):
y_pred = predict(x)
loss = F.mean_squared_error(y, y_pred)
l1.cleargrads()
l2.cleargrads()
loss.backward()
for l in [l1, l2]:
for p in l.params():
p.data -= lr * p.grad.data
print(loss)
|
the-stack_0_24819
|
# -*- coding: utf-8 -*-
import sys
import os
import time
from PyQt5.QtWidgets import QApplication, QDesktopWidget, QWidget, QFrame
from PyQt5.QtCore import Qt, QEventLoop, QTimer
def sleep(s):
loop = QEventLoop()
QTimer.singleShot(int(s * 1000), loop.quit)
loop.exec()
class App(QWidget):
def __init__(self):
super().__init__()
self.left = 0
self.top = 0
screenRect = QDesktopWidget.screenGeometry(QApplication.desktop())
self.width = screenRect.width()
self.height = screenRect.height()
if os.name == 'nt':
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Tool | Qt.WindowStaysOnTopHint)
else:
self.setWindowFlags(Qt.FramelessWindowHint | Qt.SubWindow | Qt.WindowStaysOnTopHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setAttribute(Qt.WA_TransparentForMouseEvents, True)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
def getDisplayArea(self):
return self.width, self.height
class Marquee(QFrame):
def __init__(self, window):
super().__init__(window)
self.screenWidth, self.screenHeight = window.getDisplayArea()
self.setStyleSheet("border:30px solid red;")
self.setGeometry(0,
0,
self.screenWidth,
self.screenHeight)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
MyMainWindow = App()
MyMarquee = Marquee(MyMainWindow)
while True:
# loop every 100ms
if os.name == 'nt':
MyMainWindow.raise_()
sleep(0.1)
time.sleep(1)
|
the-stack_0_24821
|
from base64 import b64encode
from django.shortcuts import render, redirect,reverse
from .models import *
from django.http import JsonResponse,HttpResponse,HttpResponseRedirect
import json
import hashlib
import telebot
import time
from .forms import *
import requests
bot = telebot.TeleBot('1756376023:AAFNHBzVvdcx2rh1f_Xsc8lKbz0-pzFFqP0')
ids = ['344548620', '412228067']
def myajaxtestview(request):
device = request.COOKIES['device']
order_id=request.POST['order_id']
customer, created = Customer.objects.get_or_create(device=device)
order, created = Order.objects.get_or_create(customer=customer, complete=False)
orderdict={}
orderdict['phone'] = request.POST['phone']
orderdict['name'] = request.POST['name']
orderdict['adress'] = request.POST['adress']
ord_type='card'
send_info(order, orderdict,ord_type,order_id)
order.delete()
def send_info(order,orderdict,ord_rype,ord_id=None):
for id in ids:
order_str=''
items=list(order.orderitem_set.all())
print(items)
for item in items:
order_str+=f'Продукт:{item.product.name}\nКоличество:{item.quantity}\nОписание:{item.product.description}\n_____________________\n'
for key in orderdict.keys():
order_str+='\n'+key+': '+orderdict[key]
if ord_rype=='card':
order_str+=f'\nЗаказ картой\nНомер заказа:{ord_id}'
else:
order_str+='\n ну бля обычный заказ нахуй тебе айди)'
bot.send_message(id, order_str)
time.sleep(1)
def send_form(mess_dict):
message=''
if 'date' in mess_dict:
message+='Нам поступил заказ столика:\n'+'Имя:'+mess_dict['name']+'\nСообщение:'+mess_dict['message']+\
'\nТелефон:'+mess_dict['phone']\
+'\nДата:'+mess_dict['date']\
+'\nВремя:'+mess_dict['time']\
+'\nКоличество человек:'+mess_dict['people']
else:
message+='Нам поступило сообщение:\n'+'Имя:'+mess_dict['name']+'\nСообщение:'+mess_dict['message']+\
'\nТелефон:'+mess_dict['phone']\
+'\nПочта: '+mess_dict['email']
for id in ids:
bot.send_message(id,message)
def store(request):
if request.method == 'POST':
mess_dict={}
mess_dict['name']=request.POST['name']
mess_dict['message']=request.POST['message']
mess_dict['phone']=request.POST['phone']
try:
mess_dict['date']=request.POST['date']
mess_dict['time']=request.POST['time']
mess_dict['people']=request.POST['people']
except:
mess_dict['email']=request.POST['email']
send_form(mess_dict)
products = Product.objects.all()
categories= Category.objects.all()
context = {'products' : products, "categories":categories
}
return render(request, 'home/index.html', context)
def delivery(request):
return render(request,'home/delivery.html')
def privacy_policy(request):
return render(request,'home/privacypolicy.html')
def cart(request):
device = request.COOKIES['device']
customer, created = Customer.objects.get_or_create(device=device)
order_id=device+str(time.time())
order, created = Order.objects.get_or_create(customer=customer, complete=False)
amount=order.get_cart_total
checkout={'public_key':'i67910001878',
'private_key':'b8sDbUFGnZMTGXV5gy7z1tb687cqn5CqBsFoDw4f',
'version':3,
'action':'pay',
'amount':amount,
'currency':'UAH',
'description':'test',
'order_id':order_id,
'result_url':'https://vk.com/morgenshtern?z=audio_playlist-2000618831_11618831%2F77ef1d65a8a66736ff'
}
enc_checkout=b64encode(json.dumps(checkout).encode("utf-8")).decode("ascii")
sign_string=checkout['private_key']+enc_checkout+checkout['private_key']
sign_enc = b64encode(hashlib.sha1(sign_string.encode('ascii')).digest()).decode("ascii")
CHOICES = [('M', 'Оплата наличными при получении'), ('F', 'Оплата картой')]
Gender = forms.CharField(label='Gender', widget=forms.RadioSelect(choices=CHOICES))
context = {'order':order,'checkout_data':enc_checkout,'signature':sign_enc,'form':Gender,'order_id':order_id}
if request.method == 'POST':
orderdict={}
orderdict['name']=request.POST['name']
orderdict['adress']=request.POST['adress']
orderdict['phone']=request.POST['phone']
ord_type='cash'
send_info(order,orderdict,ord_type,order_id)
order.delete()
opt=request.POST['option']
return render(request,'home/index.html')
return render(request, 'home/cart.html', context)
def updateItem(request):
data = json.loads(request.body)
productId = data['productId']
action = data['action']
device = request.COOKIES['device']
customer, created = Customer.objects.get_or_create(device=device)
product = Product.objects.get(id=productId)
order, created = Order.objects.get_or_create(customer=customer, complete=False)
orderItem, created = OrderItem.objects.get_or_create(order=order, product=product)
if action=='add':
orderItem.quantity=(orderItem.quantity+1)
elif action=='remove':
orderItem.quantity = (orderItem.quantity-1)
orderItem.save()
if orderItem.quantity<=0:
orderItem.delete()
if action=='delete':
orderItem.delete()
return JsonResponse('Item was added',safe=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.